summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c5
10 files changed, 150 insertions, 40 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index b0e29e342401..e809249500e1 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -509,6 +509,7 @@ enum ice_pf_flags {
ICE_FLAG_VF_VLAN_PRUNING,
ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_FLAG_PLUG_AUX_DEV,
+ ICE_FLAG_UNPLUG_AUX_DEV,
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_PF_FLAGS_NBITS /* must be last */
@@ -955,16 +956,11 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
*/
static inline void ice_clear_rdma_cap(struct ice_pf *pf)
{
- /* We can directly unplug aux device here only if the flag bit
- * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev()
- * could race with ice_plug_aux_dev() called from
- * ice_service_task(). In this case we only clear that bit now and
- * aux device will be unplugged later once ice_plug_aux_device()
- * called from ice_service_task() finishes (see ice_service_task()).
+ /* defer unplug to service task to avoid RTNL lock and
+ * clear PLUG bit so that pending plugs don't interfere
*/
- if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
- ice_unplug_aux_dev(pf);
-
+ clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
+ set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 0f52ea38b6f3..450317dfcca7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -291,6 +291,7 @@ static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
struct ice_vsi_ctx *ctxt;
int status;
+ ice_fltr_remove_all(vsi);
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return;
@@ -2892,7 +2893,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, false);
- ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
if (err)
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 567694bf098b..0d8b8c6f9bd3 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2316,18 +2316,15 @@ static void ice_service_task(struct work_struct *work)
}
}
- if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
- /* Plug aux device per request */
- ice_plug_aux_dev(pf);
+ /* unplug aux dev per request, if an unplug request came in
+ * while processing a plug request, this will handle it
+ */
+ if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
+ ice_unplug_aux_dev(pf);
- /* Mark plugging as done but check whether unplug was
- * requested during ice_plug_aux_dev() call
- * (e.g. from ice_clear_rdma_cap()) and if so then
- * plug aux device.
- */
- if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
- ice_unplug_aux_dev(pf);
- }
+ /* Plug aux device per request */
+ if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+ ice_plug_aux_dev(pf);
if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
struct iidc_event *event;
@@ -4644,6 +4641,12 @@ static int ice_start_eth(struct ice_vsi *vsi)
return err;
}
+static void ice_stop_eth(struct ice_vsi *vsi)
+{
+ ice_fltr_remove_all(vsi);
+ ice_vsi_close(vsi);
+}
+
static int ice_init_eth(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
@@ -5132,7 +5135,7 @@ void ice_unload(struct ice_pf *pf)
{
ice_deinit_features(pf);
ice_deinit_rdma(pf);
- ice_vsi_close(ice_get_main_vsi(pf));
+ ice_stop_eth(ice_get_main_vsi(pf));
ice_vsi_decfg(ice_get_main_vsi(pf));
ice_deinit_dev(pf);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 4eca8d195ef0..b7682de0ae05 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -2788,7 +2788,7 @@ static int
ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
u16 vsi_handle, unsigned long *tc_bitmap)
{
- struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
+ struct ice_sched_agg_vsi_info *agg_vsi_info, *iter, *old_agg_vsi_info = NULL;
struct ice_sched_agg_info *agg_info, *old_agg_info;
struct ice_hw *hw = pi->hw;
int status = 0;
@@ -2806,11 +2806,13 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
if (old_agg_info && old_agg_info != agg_info) {
struct ice_sched_agg_vsi_info *vtmp;
- list_for_each_entry_safe(old_agg_vsi_info, vtmp,
+ list_for_each_entry_safe(iter, vtmp,
&old_agg_info->agg_vsi_list,
list_entry)
- if (old_agg_vsi_info->vsi_handle == vsi_handle)
+ if (iter->vsi_handle == vsi_handle) {
+ old_agg_vsi_info = iter;
break;
+ }
}
/* check if entry already exist */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 96a64c25e2ef..0cc05e54a781 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1341,15 +1341,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
struct ice_vf *vf;
int ret;
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
if (ice_is_eswitch_mode_switchdev(pf)) {
dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
return -EOPNOTSUPP;
}
- vf = ice_get_vf_by_id(pf, vf_id);
- if (!vf)
- return -EINVAL;
-
ret = ice_check_vf_ready_for_cfg(vf);
if (ret)
goto out_put_vf;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 61f844d22512..46b36851af46 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1780,18 +1780,36 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
int
ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
{
- struct ice_vsi_ctx *ctx;
+ struct ice_vsi_ctx *ctx, *cached_ctx;
+ int status;
+
+ cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!cached_ctx)
+ return -ENOENT;
- ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
- return -EIO;
+ return -ENOMEM;
+
+ ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
+ ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
+ ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
+
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
if (enable)
ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
else
ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
- return ice_update_vsi(hw, vsi_handle, ctx, NULL);
+ status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
+ if (!status) {
+ cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
+ cached_ctx->info.valid_sections |= ctx->info.valid_sections;
+ }
+
+ kfree(ctx);
+ return status;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index dfd22862e926..4fcf2d07eb85 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -938,6 +938,7 @@ ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
* ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
+ * @ntc: index of next to clean element
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
@@ -1026,7 +1027,6 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
/**
* ice_construct_skb - Allocate skb and populate it
* @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buf: Rx buffer to pull data from
* @xdp: xdp_buff pointing to the data
*
* This function allocates an skb. It then populates it with the page
@@ -1210,6 +1210,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
if (++ntc == cnt)
ntc = 0;
+ rx_ring->first_desc = ntc;
continue;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 7bc5aa340c7d..c8322fb6f2b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -438,6 +438,7 @@ busy:
* ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
* @xdp_ring: XDP ring
* @xdp_res: Result of the receive batch
+ * @first_idx: index to write from caller
*
* This function bumps XDP Tx tail and/or flush redirect map, and
* should be called when a batch of packets has been processed in the
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index e6ef6b303222..daa6a1e894cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -542,6 +542,87 @@ static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
}
/**
+ * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
+ * @fdir: pointer to the VF FDIR structure
+ */
+static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
+{
+ enum ice_fltr_ptype flow;
+
+ for (flow = ICE_FLTR_PTYPE_NONF_NONE;
+ flow < ICE_FLTR_PTYPE_MAX; flow++) {
+ fdir->fdir_fltr_cnt[flow][0] = 0;
+ fdir->fdir_fltr_cnt[flow][1] = 0;
+ }
+}
+
+/**
+ * ice_vc_fdir_has_prof_conflict
+ * @vf: pointer to the VF structure
+ * @conf: FDIR configuration for each filter
+ *
+ * Check if @conf has conflicting profile with existing profiles
+ *
+ * Return: true on success, and false on error.
+ */
+static bool
+ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf)
+{
+ struct ice_fdir_fltr *desc;
+
+ list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
+ struct virtchnl_fdir_fltr_conf *existing_conf;
+ enum ice_fltr_ptype flow_type_a, flow_type_b;
+ struct ice_fdir_fltr *a, *b;
+
+ existing_conf = to_fltr_conf_from_desc(desc);
+ a = &existing_conf->input;
+ b = &conf->input;
+ flow_type_a = a->flow_type;
+ flow_type_b = b->flow_type;
+
+ /* No need to compare two rules with different tunnel types or
+ * with the same protocol type.
+ */
+ if (existing_conf->ttype != conf->ttype ||
+ flow_type_a == flow_type_b)
+ continue;
+
+ switch (flow_type_a) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+ flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+ if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+ if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
+ flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
+ flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
+
+/**
* ice_vc_fdir_write_flow_prof
* @vf: pointer to the VF structure
* @flow: filter flow type
@@ -677,6 +758,13 @@ ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
enum ice_fltr_ptype flow;
int ret;
+ ret = ice_vc_fdir_has_prof_conflict(vf, conf);
+ if (ret) {
+ dev_dbg(dev, "Found flow profile conflict for VF %d\n",
+ vf->vf_id);
+ return ret;
+ }
+
flow = input->flow_type;
ret = ice_vc_fdir_alloc_prof(vf, flow);
if (ret) {
@@ -1798,7 +1886,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
- goto err_free_conf;
+ goto err_rem_entry;
}
ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
@@ -1807,15 +1895,16 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
vf->vf_id, ret);
- goto err_rem_entry;
+ goto err_clr_irq;
}
exit:
kfree(stat);
return ret;
-err_rem_entry:
+err_clr_irq:
ice_vc_fdir_clear_irq_ctx(vf);
+err_rem_entry:
ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
err_free_conf:
devm_kfree(dev, conf);
@@ -1924,6 +2013,7 @@ void ice_vf_fdir_init(struct ice_vf *vf)
spin_lock_init(&fdir->ctx_lock);
fdir->ctx_irq.flags = 0;
fdir->ctx_done.flags = 0;
+ ice_vc_fdir_reset_cnt_all(fdir);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 31565bbafa22..d1e489da7363 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -184,8 +184,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
}
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
-
ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
if (err)
@@ -200,10 +198,11 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
}
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
- ice_clean_rx_ring(rx_ring);
ice_qvec_toggle_napi(vsi, q_vector, false);
ice_qp_clean_rings(vsi, q_idx);