diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
19 files changed, 341 insertions, 98 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index ea2bb0140a6e..10d7a982a5b9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -177,6 +177,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset) "Cannot locate client instance close routine\n"); return; } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { + dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n"); + return; + } cdev->client->ops->close(&cdev->lan_info, cdev->client, reset); clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); i40e_client_release_qvlist(&cdev->lan_info); @@ -429,7 +433,6 @@ void i40e_client_subtask(struct i40e_pf *pf) /* Remove failed client instance */ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); - i40e_client_del_instance(pf); return; } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 156e92c43780..e9cd0fa6a0d2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -4485,7 +4485,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, (struct in6_addr *)&ipv6_full_mask)) new_mask |= I40E_L3_V6_DST_MASK; else if (ipv6_addr_any((struct in6_addr *) - &usr_ip6_spec->ip6src)) + &usr_ip6_spec->ip6dst)) new_mask &= ~I40E_L3_V6_DST_MASK; else return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b36bf9c3e1e4..10c1e1ea83a1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -384,7 +384,9 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue) set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); break; default: - netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n"); + set_bit(__I40E_DOWN_REQUESTED, pf->state); + set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state); break; } @@ -6657,6 +6659,9 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi) vsi->tc_seid_map[i] = ch->seid; } } + + /* reset to reconfigure TX queue contexts */ + i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true); return ret; err_free: diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index f6ba97a0166e..69e67eb6aea7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3203,11 +3203,13 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, protocol = vlan_get_protocol(skb); - if (eth_p_mpls(protocol)) + if (eth_p_mpls(protocol)) { ip.hdr = skb_inner_network_header(skb); - else + l4.hdr = skb_checksum_start(skb); + } else { ip.hdr = skb_network_header(skb); - l4.hdr = skb_checksum_start(skb); + l4.hdr = skb_transport_header(skb); + } /* set the tx_flags to indicate the IP protocol type. this is * required so that checksum header computation below is accurate. @@ -3686,7 +3688,8 @@ u16 i40e_lan_select_queue(struct net_device *netdev, u8 prio; /* is DCB enabled at all? */ - if (vsi->tc_config.numtc == 1) + if (vsi->tc_config.numtc == 1 || + i40e_is_tc_mqprio_enabled(vsi->back)) return netdev_pick_tx(netdev, skb, sb_dev); prio = skb->priority; diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c index cd4e6a22d0f9..9ffbd24d83cb 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c @@ -324,6 +324,7 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw) static enum iavf_status iavf_init_asq(struct iavf_hw *hw) { enum iavf_status ret_code = 0; + int i; if (hw->aq.asq.count > 0) { /* queue already initialized */ @@ -354,12 +355,17 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw) /* initialize base registers */ ret_code = iavf_config_asq_regs(hw); if (ret_code) - goto init_adminq_free_rings; + goto init_free_asq_bufs; /* success! */ hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; +init_free_asq_bufs: + for (i = 0; i < hw->aq.num_asq_entries; i++) + iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); + init_adminq_free_rings: iavf_free_adminq_asq(hw); @@ -383,6 +389,7 @@ init_adminq_exit: static enum iavf_status iavf_init_arq(struct iavf_hw *hw) { enum iavf_status ret_code = 0; + int i; if (hw->aq.arq.count > 0) { /* queue already initialized */ @@ -413,12 +420,16 @@ static enum iavf_status iavf_init_arq(struct iavf_hw *hw) /* initialize base registers */ ret_code = iavf_config_arq_regs(hw); if (ret_code) - goto init_adminq_free_rings; + goto init_free_arq_bufs; /* success! */ hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; +init_free_arq_bufs: + for (i = 0; i < hw->aq.num_arq_entries; i++) + iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); init_adminq_free_rings: iavf_free_adminq_arq(hw); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 45d097a164ad..10aa99dfdcdb 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -2367,7 +2367,7 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter) err = iavf_get_vf_config(adapter); if (err == -EALREADY) { err = iavf_send_vf_config_msg(adapter); - goto err_alloc; + goto err; } else if (err == -EINVAL) { /* We only get -EINVAL if the device is in a very bad * state or if we've been disabled for previous bad @@ -2877,6 +2877,11 @@ static void iavf_reset_task(struct work_struct *work) int i = 0, err; bool running; + /* Detach interface to avoid subsequent NDO callbacks */ + rtnl_lock(); + netif_device_detach(netdev); + rtnl_unlock(); + /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ @@ -2884,7 +2889,7 @@ static void iavf_reset_task(struct work_struct *work) if (adapter->state != __IAVF_REMOVE) queue_work(iavf_wq, &adapter->reset_task); - return; + goto reset_finish; } while (!mutex_trylock(&adapter->client_lock)) @@ -2954,7 +2959,6 @@ continue_reset: if (running) { netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); adapter->link_up = false; iavf_napi_disable_all(adapter); } @@ -3084,14 +3088,21 @@ continue_reset: mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); - return; + goto reset_finish; reset_err: + if (running) { + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); + iavf_free_traffic_irqs(adapter); + } + iavf_disable_vf(adapter); + mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); - if (running) - iavf_change_state(adapter, __IAVF_RUNNING); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); - iavf_close(netdev); +reset_finish: + rtnl_lock(); + netif_device_attach(netdev); + rtnl_unlock(); } /** @@ -4085,8 +4096,17 @@ static int iavf_open(struct net_device *netdev) return -EIO; } - while (!mutex_trylock(&adapter->crit_lock)) + while (!mutex_trylock(&adapter->crit_lock)) { + /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock + * is already taken and iavf_open is called from an upper + * device's notifier reacting on NETDEV_REGISTER event. + * We have to leave here to avoid dead lock. + */ + if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) + return -EBUSY; + usleep_range(500, 1000); + } if (adapter->state != __IAVF_DOWN) { err = -EBUSY; diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index cc5b85afd437..841fa149c407 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -684,8 +684,8 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) * ice_xsk_pool - get XSK buffer pool bound to a ring * @ring: Rx ring to use * - * Returns a pointer to xdp_umem structure if there is a buffer pool present, - * NULL otherwise. + * Returns a pointer to xsk_buff_pool structure if there is a buffer pool + * present, NULL otherwise. */ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) { @@ -699,23 +699,33 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) } /** - * ice_tx_xsk_pool - get XSK buffer pool bound to a ring - * @ring: Tx ring to use + * ice_tx_xsk_pool - assign XSK buff pool to XDP ring + * @vsi: pointer to VSI + * @qid: index of a queue to look at XSK buff pool presence * - * Returns a pointer to xdp_umem structure if there is a buffer pool present, - * NULL otherwise. Tx equivalent of ice_xsk_pool. + * Sets XSK buff pool pointer on XDP ring. + * + * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided + * queue id. Reason for doing so is that queue vectors might have assigned more + * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring + * carries a pointer to one of these XDP rings for its own purposes, such as + * handling XDP_TX action, therefore we can piggyback here on the + * rx_ring->xdp_ring assignment that was done during XDP rings initialization. */ -static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring) +static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid) { - struct ice_vsi *vsi = ring->vsi; - u16 qid; + struct ice_tx_ring *ring; - qid = ring->q_index - vsi->alloc_txq; + ring = vsi->rx_rings[qid]->xdp_ring; + if (!ring) + return; - if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) - return NULL; + if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) { + ring->xsk_pool = NULL; + return; + } - return xsk_get_pool_from_qid(vsi->netdev, qid); + ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 136d7911adb4..1e3243808178 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -7,18 +7,6 @@ #include "ice_dcb_lib.h" #include "ice_sriov.h" -static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring) -{ - rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL); - return !!rx_ring->xdp_buf; -} - -static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring) -{ - rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); - return !!rx_ring->rx_buf; -} - /** * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI * @qs_cfg: gathered variables needed for PF->VSI queues assignment @@ -519,11 +507,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->q_index, ring->q_vector->napi.napi_id); - kfree(ring->rx_buf); ring->xsk_pool = ice_xsk_pool(ring); if (ring->xsk_pool) { - if (!ice_alloc_rx_buf_zc(ring)) - return -ENOMEM; xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); ring->rx_buf_len = @@ -538,8 +523,6 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->q_index); } else { - if (!ice_alloc_rx_buf(ring)) - return -ENOMEM; if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) /* coverity[check_return] */ xdp_rxq_info_reg(&ring->xdp_rxq, diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c index 85a94483c2ed..40e678cfb507 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.c +++ b/drivers/net/ethernet/intel/ice/ice_fltr.c @@ -62,7 +62,7 @@ ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, int result; result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false); - if (result) + if (result && result != -EEXIST) dev_err(ice_pf_to_dev(pf), "Error setting promisc mode on VSI %i (rc=%d)\n", vsi->vsi_num, result); @@ -86,7 +86,7 @@ ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, int result; result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true); - if (result) + if (result && result != -EEXIST) dev_err(ice_pf_to_dev(pf), "Error clearing promisc mode on VSI %i (rc=%d)\n", vsi->vsi_num, result); @@ -109,7 +109,7 @@ ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, int result; result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid); - if (result) + if (result && result != -EEXIST) dev_err(ice_pf_to_dev(pf), "Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n", ice_get_hw_vsi_num(hw, vsi_handle), vid, result); @@ -132,7 +132,7 @@ ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, int result; result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid); - if (result) + if (result && result != -EEXIST) dev_err(ice_pf_to_dev(pf), "Error setting promisc mode on VSI %i for VID %u (rc=%d)\n", ice_get_hw_vsi_num(hw, vsi_handle), vid, result); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index a830f7f9aed0..0c4ec9264071 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1986,8 +1986,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) if (ret) return ret; - ice_for_each_xdp_txq(vsi, i) - vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]); + ice_for_each_rxq(vsi, i) + ice_tx_xsk_pool(vsi, i); return ret; } @@ -3181,7 +3181,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) pf = vsi->back; vtype = vsi->type; - if (WARN_ON(vtype == ICE_VSI_VF) && !vsi->vf) + if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf)) return -EINVAL; ice_vsi_init_vlan_ops(vsi); @@ -4062,7 +4062,11 @@ int ice_vsi_del_vlan_zero(struct ice_vsi *vsi) if (err && err != -EEXIST) return err; - return 0; + /* when deleting the last VLAN filter, make sure to disable the VLAN + * promisc mode so the filter isn't left by accident + */ + return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, 0); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index eb40526ee179..8c30eea61b6d 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -267,8 +267,10 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); } + if (status && status != -EEXIST) + return status; - return status; + return 0; } /** @@ -2579,7 +2581,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) if (ice_setup_tx_ring(xdp_ring)) goto free_xdp_rings; ice_set_ring_xdp(xdp_ring); - xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring); spin_lock_init(&xdp_ring->tx_lock); for (j = 0; j < xdp_ring->count; j++) { tx_desc = ICE_TX_DESC(xdp_ring, j); @@ -2587,13 +2588,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) } } - ice_for_each_rxq(vsi, i) { - if (static_key_enabled(&ice_xdp_locking_key)) - vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; - else - vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i]; - } - return 0; free_xdp_rings: @@ -2683,6 +2677,23 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) xdp_rings_rem -= xdp_rings_per_v; } + ice_for_each_rxq(vsi, i) { + if (static_key_enabled(&ice_xdp_locking_key)) { + vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; + } else { + struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector; + struct ice_tx_ring *ring; + + ice_for_each_tx_ring(ring, q_vector->tx) { + if (ice_ring_is_xdp(ring)) { + vsi->rx_rings[i]->xdp_ring = ring; + break; + } + } + } + ice_tx_xsk_pool(vsi, i); + } + /* omit the scheduler update if in reset path; XDP queues will be * taken into account at the end of ice_vsi_rebuild, where * ice_cfg_vsi_lan is being called @@ -2887,10 +2898,18 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); } + /* reallocate Rx queues that are used for zero-copy */ + xdp_ring_err = ice_realloc_zc_buf(vsi, true); + if (xdp_ring_err) + NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { xdp_ring_err = ice_destroy_xdp_rings(vsi); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); + /* reallocate Rx queues that were used for zero-copy */ + xdp_ring_err = ice_realloc_zc_buf(vsi, false); + if (xdp_ring_err) + NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); } else { /* safe to call even when prog == vsi->xdp_prog as * dev_xdp_install in net/core/dev.c incremented prog's @@ -3573,6 +3592,14 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) usleep_range(1000, 2000); + ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, vid); + if (ret) { + netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n", + vsi->vsi_num); + vsi->current_netdev_flags |= IFF_ALLMULTI; + } + vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); /* Make sure VLAN delete is successful before updating VLAN @@ -3886,7 +3913,7 @@ static int ice_init_pf(struct ice_pf *pf) pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); if (!pf->avail_rxqs) { - devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs); + bitmap_free(pf->avail_txqs); pf->avail_txqs = NULL; return -ENOMEM; } diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 262e553e3b58..3808034f7e7e 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -4445,6 +4445,13 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, goto free_fltr_list; list_for_each_entry(list_itr, &vsi_list_head, list_entry) { + /* Avoid enabling or disabling VLAN zero twice when in double + * VLAN mode + */ + if (ice_is_dvm_ena(hw) && + list_itr->fltr_info.l_data.vlan.tpid == 0) + continue; + vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; if (rm_vlan_promisc) status = ice_clear_vsi_promisc(hw, vsi_handle, @@ -4452,7 +4459,7 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, else status = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vlan_id); - if (status) + if (status && status != -EEXIST) break; } diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 8fd7c3e37f5e..0abeed092de1 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -571,8 +571,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) if (ice_is_vf_disabled(vf)) { vsi = ice_get_vf_vsi(vf); - if (WARN_ON(!vsi)) + if (!vsi) { + dev_dbg(dev, "VF is already removed\n"); return -EINVAL; + } ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); ice_vsi_stop_all_rx_rings(vsi); dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", @@ -762,13 +764,16 @@ static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable) static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi) { struct ice_vsi_vlan_ops *vlan_ops; - int err; + int err = 0; vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); - err = vlan_ops->ena_tx_filtering(vsi); - if (err) - return err; + /* Allow VF with VLAN 0 only to send all tagged traffic */ + if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) { + err = vlan_ops->ena_tx_filtering(vsi); + if (err) + return err; + } return ice_cfg_mac_antispoof(vsi, true); } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 094e3c97a1ea..2b4c791b6cba 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -2288,6 +2288,15 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) /* Enable VLAN filtering on first non-zero VLAN */ if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) { + if (vf->spoofchk) { + status = vsi->inner_vlan_ops.ena_tx_filtering(vsi); + if (status) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n", + vid, status); + goto error_param; + } + } if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n", @@ -2333,8 +2342,10 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) } /* Disable VLAN filtering when only VLAN 0 is left */ - if (!ice_vsi_has_non_zero_vlans(vsi)) + if (!ice_vsi_has_non_zero_vlans(vsi)) { + vsi->inner_vlan_ops.dis_tx_filtering(vsi); vsi->inner_vlan_ops.dis_rx_filtering(vsi); + } if (vlan_promisc) ice_vf_dis_vlan_promisc(vsi, &vlan); @@ -2838,6 +2849,13 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi, if (vlan_promisc) ice_vf_dis_vlan_promisc(vsi, &vlan); + + /* Disable VLAN filtering when only VLAN 0 is left */ + if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) { + err = vsi->outer_vlan_ops.dis_tx_filtering(vsi); + if (err) + return err; + } } vc_vlan = &vlan_fltr->inner; @@ -2853,8 +2871,17 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi, /* no support for VLAN promiscuous on inner VLAN unless * we are in Single VLAN Mode (SVM) */ - if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) - ice_vf_dis_vlan_promisc(vsi, &vlan); + if (!ice_is_dvm_ena(&vsi->back->hw)) { + if (vlan_promisc) + ice_vf_dis_vlan_promisc(vsi, &vlan); + + /* Disable VLAN filtering when only VLAN 0 is left */ + if (!ice_vsi_has_non_zero_vlans(vsi)) { + err = vsi->inner_vlan_ops.dis_tx_filtering(vsi); + if (err) + return err; + } + } } } @@ -2931,6 +2958,13 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi, if (err) return err; } + + /* Enable VLAN filtering on first non-zero VLAN */ + if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) { + err = vsi->outer_vlan_ops.ena_tx_filtering(vsi); + if (err) + return err; + } } vc_vlan = &vlan_fltr->inner; @@ -2946,10 +2980,19 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi, /* no support for VLAN promiscuous on inner VLAN unless * we are in Single VLAN Mode (SVM) */ - if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) { - err = ice_vf_ena_vlan_promisc(vsi, &vlan); - if (err) - return err; + if (!ice_is_dvm_ena(&vsi->back->hw)) { + if (vlan_promisc) { + err = ice_vf_ena_vlan_promisc(vsi, &vlan); + if (err) + return err; + } + + /* Enable VLAN filtering on first non-zero VLAN */ + if (vf->spoofchk && vlan.vid) { + err = vsi->inner_vlan_ops.ena_tx_filtering(vsi); + if (err) + return err; + } } } } diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 49ba8bfdbf04..03ce85f6e6df 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -192,6 +192,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true); if (err) return err; + ice_clean_rx_ring(rx_ring); ice_qvec_toggle_napi(vsi, q_vector, false); ice_qp_clean_rings(vsi, q_idx); @@ -243,7 +244,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) if (err) goto free_buf; ice_set_ring_xdp(xdp_ring); - xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring); + ice_tx_xsk_pool(vsi, q_idx); } err = ice_vsi_cfg_rxq(rx_ring); @@ -317,6 +318,62 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) } /** + * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer + * @rx_ring: Rx ring + * @pool_present: is pool for XSK present + * + * Try allocating memory and return ENOMEM, if failed to allocate. + * If allocation was successful, substitute buffer with allocated one. + * Returns 0 on success, negative on failure + */ +static int +ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present) +{ + size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) : + sizeof(*rx_ring->rx_buf); + void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); + + if (!sw_ring) + return -ENOMEM; + + if (pool_present) { + kfree(rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + rx_ring->xdp_buf = sw_ring; + } else { + kfree(rx_ring->xdp_buf); + rx_ring->xdp_buf = NULL; + rx_ring->rx_buf = sw_ring; + } + + return 0; +} + +/** + * ice_realloc_zc_buf - reallocate XDP ZC queue pairs + * @vsi: Current VSI + * @zc: is zero copy set + * + * Reallocate buffer for rx_rings that might be used by XSK. + * XDP requires more memory, than rx_buf provides. + * Returns 0 on success, negative on failure + */ +int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc) +{ + struct ice_rx_ring *rx_ring; + unsigned long q; + + for_each_set_bit(q, vsi->af_xdp_zc_qps, + max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) { + rx_ring = vsi->rx_rings[q]; + if (ice_realloc_rx_xdp_bufs(rx_ring, zc)) + return -ENOMEM; + } + + return 0; +} + +/** * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state * @vsi: Current VSI * @pool: buffer pool to enable/associate to a ring, NULL to disable @@ -329,6 +386,12 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) bool if_running, pool_present = !!pool; int ret = 0, pool_failure = 0; + if (qid >= vsi->num_rxq || qid >= vsi->num_txq) { + netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n"); + pool_failure = -EINVAL; + goto failure; + } + if (!is_power_of_2(vsi->rx_rings[qid]->count) || !is_power_of_2(vsi->tx_rings[qid]->count)) { netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n"); @@ -339,11 +402,17 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi); if (if_running) { + struct ice_rx_ring *rx_ring = vsi->rx_rings[qid]; + ret = ice_qp_dis(vsi, qid); if (ret) { netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret); goto xsk_pool_if_up; } + + ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present); + if (ret) + goto xsk_pool_if_up; } pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) : @@ -353,7 +422,7 @@ xsk_pool_if_up: if (if_running) { ret = ice_qp_ena(vsi, qid); if (!ret && pool_present) - napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi); + napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi); else if (ret) netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret); } @@ -944,13 +1013,13 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, if (!ice_is_xdp_ena_vsi(vsi)) return -EINVAL; - if (queue_id >= vsi->num_txq) + if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq) return -EINVAL; - if (!vsi->xdp_rings[queue_id]->xsk_pool) - return -EINVAL; + ring = vsi->rx_rings[queue_id]->xdp_ring; - ring = vsi->xdp_rings[queue_id]; + if (!ring->xsk_pool) + return -EINVAL; /* The idea here is that if NAPI is running, mark a miss, so * it will run again. If not, trigger an interrupt and diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h index 21faec8e97db..4edbe81eb646 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.h +++ b/drivers/net/ethernet/intel/ice/ice_xsk.h @@ -27,6 +27,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring); void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring); bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget); +int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc); #else static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring, @@ -72,5 +73,12 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev, static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { } static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { } + +static inline int +ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi, + bool __always_unused zc) +{ + return 0; +} #endif /* CONFIG_XDP_SOCKETS */ #endif /* !_ICE_XSK_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 2d3daf022651..015b78144114 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -664,6 +664,8 @@ struct igb_adapter { struct igb_mac_addr *mac_table; struct vf_mac_filter vf_macs; struct vf_mac_filter *vf_mac_list; + /* lock for VF resources */ + spinlock_t vfs_lock; }; /* flags controlling PTP/1588 function */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index d8b836a85cc3..2796e81d2726 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3637,6 +3637,7 @@ static int igb_disable_sriov(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + unsigned long flags; /* reclaim resources allocated to VFs */ if (adapter->vf_data) { @@ -3649,12 +3650,13 @@ static int igb_disable_sriov(struct pci_dev *pdev) pci_disable_sriov(pdev); msleep(500); } - + spin_lock_irqsave(&adapter->vfs_lock, flags); kfree(adapter->vf_mac_list); adapter->vf_mac_list = NULL; kfree(adapter->vf_data); adapter->vf_data = NULL; adapter->vfs_allocated_count = 0; + spin_unlock_irqrestore(&adapter->vfs_lock, flags); wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); wrfl(); msleep(100); @@ -3814,7 +3816,9 @@ static void igb_remove(struct pci_dev *pdev) igb_release_hw_control(adapter); #ifdef CONFIG_PCI_IOV + rtnl_lock(); igb_disable_sriov(pdev); + rtnl_unlock(); #endif unregister_netdev(netdev); @@ -3974,6 +3978,9 @@ static int igb_sw_init(struct igb_adapter *adapter) spin_lock_init(&adapter->nfc_lock); spin_lock_init(&adapter->stats64_lock); + + /* init spinlock to avoid concurrency of VF resources */ + spin_lock_init(&adapter->vfs_lock); #ifdef CONFIG_PCI_IOV switch (hw->mac.type) { case e1000_82576: @@ -7958,8 +7965,10 @@ unlock: static void igb_msg_task(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + unsigned long flags; u32 vf; + spin_lock_irqsave(&adapter->vfs_lock, flags); for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { /* process any reset requests */ if (!igb_check_for_rst(hw, vf)) @@ -7973,6 +7982,7 @@ static void igb_msg_task(struct igb_adapter *adapter) if (!igb_check_for_ack(hw, vf)) igb_rcv_ack_from_vf(adapter, vf); } + spin_unlock_irqrestore(&adapter->vfs_lock, flags); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 9f06896a049b..f8605f57bd06 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -1214,7 +1214,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) struct cyclecounter cc; unsigned long flags; u32 incval = 0; - u32 tsauxc = 0; u32 fuse0 = 0; /* For some of the boards below this mask is technically incorrect. @@ -1249,18 +1248,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) case ixgbe_mac_x550em_a: case ixgbe_mac_X550: cc.read = ixgbe_ptp_read_X550; - - /* enable SYSTIME counter */ - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0); - IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); - tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, - tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME); - IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS); - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC); - - IXGBE_WRITE_FLUSH(hw); break; case ixgbe_mac_X540: cc.read = ixgbe_ptp_read_82599; @@ -1293,6 +1280,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) } /** + * ixgbe_ptp_init_systime - Initialize SYSTIME registers + * @adapter: the ixgbe private board structure + * + * Initialize and start the SYSTIME registers. + */ +static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 tsauxc; + + switch (hw->mac.type) { + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + case ixgbe_mac_X550: + tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); + + /* Reset SYSTIME registers to 0 */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); + + /* Reset interrupt settings */ + IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC); + + /* Activate the SYSTIME counter */ + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, + tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME); + break; + case ixgbe_mac_X540: + case ixgbe_mac_82599EB: + /* Reset SYSTIME registers to 0 */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); + break; + default: + /* Other devices aren't supported */ + return; + }; + + IXGBE_WRITE_FLUSH(hw); +} + +/** * ixgbe_ptp_reset * @adapter: the ixgbe private board structure * @@ -1318,6 +1349,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) ixgbe_ptp_start_cyclecounter(adapter); + ixgbe_ptp_init_systime(adapter); + spin_lock_irqsave(&adapter->tmreg_lock, flags); timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ktime_to_ns(ktime_get_real())); |