summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h110
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h192
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c335
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c1392
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h179
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c551
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h61
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c366
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c768
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c824
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c273
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c524
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c706
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c720
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h28
28 files changed, 5897 insertions, 1468 deletions
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index e5d6f684437e..2d140ba83781 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -17,3 +17,4 @@ ice-y := ice_main.o \
ice_txrx.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
+ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 89440775aea1..792e6e42030e 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -34,6 +34,7 @@
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
+#include "ice_dcb.h"
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_sched.h"
@@ -42,10 +43,21 @@
extern const char ice_drv_ver[];
#define ICE_BAR0 0
-#define ICE_DFLT_NUM_DESC 128
#define ICE_REQ_DESC_MULTIPLE 32
#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE
#define ICE_MAX_NUM_DESC 8160
+/* set default number of Rx/Tx descriptors to the minimum between
+ * ICE_MAX_NUM_DESC and the number of descriptors to fill up an entire page
+ */
+#define ICE_DFLT_NUM_RX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
+ ALIGN(PAGE_SIZE / \
+ sizeof(union ice_32byte_rx_desc), \
+ ICE_REQ_DESC_MULTIPLE))
+#define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
+ ALIGN(PAGE_SIZE / \
+ sizeof(struct ice_tx_desc), \
+ ICE_REQ_DESC_MULTIPLE))
+
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
@@ -71,6 +83,8 @@ extern const char ice_drv_ver[];
#define ICE_MAX_QS_PER_VF 256
#define ICE_MIN_QS_PER_VF 1
#define ICE_DFLT_QS_PER_VF 4
+#define ICE_NONQ_VECS_VF 1
+#define ICE_MAX_SCATTER_QS_PER_VF 16
#define ICE_MAX_BASE_QS_PER_VF 16
#define ICE_MAX_INTR_PER_VF 65
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
@@ -114,6 +128,23 @@ extern const char ice_drv_ver[];
#define ice_for_each_q_vector(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
+#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX)
+
+#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \
+ ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_UCAST_RX | \
+ ICE_PROMISC_MCAST_RX | \
+ ICE_PROMISC_VLAN_TX | \
+ ICE_PROMISC_VLAN_RX)
+
+#define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX)
+
+#define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_MCAST_RX | \
+ ICE_PROMISC_VLAN_TX | \
+ ICE_PROMISC_VLAN_RX)
+
struct ice_tc_info {
u16 qoffset;
u16 qcount_tx;
@@ -123,7 +154,7 @@ struct ice_tc_info {
struct ice_tc_cfg {
u8 numtc; /* Total number of enabled TCs */
- u8 ena_tc; /* TX map */
+ u8 ena_tc; /* Tx map */
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
};
@@ -134,7 +165,7 @@ struct ice_res_tracker {
};
struct ice_qs_cfg {
- struct mutex *qs_mutex; /* will be assgined to &pf->avail_q_mutex */
+ struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */
unsigned long *pf_map;
unsigned long pf_map_size;
unsigned int q_count;
@@ -224,6 +255,8 @@ struct ice_vsi {
s16 vf_id; /* VF ID for SR-IOV VSIs */
+ u16 ethtype; /* Ethernet protocol for pause frame */
+
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */
@@ -247,6 +280,7 @@ struct ice_vsi {
u8 irqs_ready;
u8 current_isup; /* Sync 'link up' logging */
u8 stat_offsets_loaded;
+ u8 vlan_ena;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -257,26 +291,34 @@ struct ice_vsi {
u16 num_txq; /* Used Tx queues */
u16 alloc_rxq; /* Allocated Rx queues */
u16 num_rxq; /* Used Rx queues */
- u16 num_desc;
+ u16 num_rx_desc;
+ u16 num_tx_desc;
struct ice_tc_cfg tc_cfg;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
struct ice_q_vector {
struct ice_vsi *vsi;
- cpumask_t affinity_mask;
- struct napi_struct napi;
- struct ice_ring_container rx;
- struct ice_ring_container tx;
- struct irq_affinity_notify affinity_notify;
+
u16 v_idx; /* index in the vsi->q_vector array. */
- u8 num_ring_tx; /* total number of Tx rings in vector */
+ u16 reg_idx;
u8 num_ring_rx; /* total number of Rx rings in vector */
- char name[ICE_INT_NAME_STR_LEN];
+ u8 num_ring_tx; /* total number of Tx rings in vector */
+ u8 itr_countdown; /* when 0 should adjust adaptive ITR */
/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
* value to the device
*/
u8 intrl;
+
+ struct napi_struct napi;
+
+ struct ice_ring_container rx;
+ struct ice_ring_container tx;
+
+ cpumask_t affinity_mask;
+ struct irq_affinity_notify affinity_notify;
+
+ char name[ICE_INT_NAME_STR_LEN];
} ____cacheline_internodealigned_in_smp;
enum ice_pf_flags {
@@ -285,7 +327,11 @@ enum ice_pf_flags {
ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA,
ICE_FLAG_SRIOV_CAPABLE,
+ ICE_FLAG_DCB_CAPABLE,
+ ICE_FLAG_DCB_ENA,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
+ ICE_FLAG_DISABLE_FW_LLDP,
+ ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -324,8 +370,8 @@ struct ice_pf {
u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
- u16 num_lan_tx; /* num lan Tx queues setup */
- u16 num_lan_rx; /* num lan Rx queues setup */
+ u16 num_lan_tx; /* num LAN Tx queues setup */
+ u16 num_lan_rx; /* num LAN Rx queues setup */
u16 q_left_tx; /* remaining num Tx queues left unclaimed */
u16 q_left_rx; /* remaining num Rx queues left unclaimed */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@@ -339,6 +385,9 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded; /* has previous stats been loaded */
+#ifdef CONFIG_DCB
+ u16 dcbx_cap;
+#endif /* CONFIG_DCB */
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
@@ -351,14 +400,15 @@ struct ice_netdev_priv {
/**
* ice_irq_dynamic_ena - Enable default interrupt generation settings
- * @hw: pointer to hw struct
- * @vsi: pointer to vsi struct, can be NULL
+ * @hw: pointer to HW struct
+ * @vsi: pointer to VSI struct, can be NULL
* @q_vector: pointer to q_vector, can be NULL
*/
-static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
- struct ice_q_vector *q_vector)
+static inline void
+ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
+ struct ice_q_vector *q_vector)
{
- u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx :
+ u32 vector = (vsi && q_vector) ? q_vector->reg_idx :
((struct ice_pf *)hw->back)->hw_oicr_idx;
int itr = ICE_ITR_NONE;
u32 val;
@@ -374,10 +424,24 @@ static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
wr32(hw, GLINT_DYN_CTL(vector), val);
}
-static inline void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
+/**
+ * ice_find_vsi_by_type - Find and return VSI of a given type
+ * @pf: PF to search for VSI
+ * @type: Value indicating type of VSI we are looking for
+ */
+static inline struct ice_vsi *
+ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type)
{
- vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
- vsi->tc_cfg.numtc = 1;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ struct ice_vsi *vsi = pf->vsi[i];
+
+ if (vsi && vsi->type == type)
+ return vsi;
+ }
+
+ return NULL;
}
void ice_set_ethtool_ops(struct net_device *netdev);
@@ -388,5 +452,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
void ice_napi_del(struct ice_vsi *vsi);
+#ifdef CONFIG_DCB
+int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
+void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
+#endif /* CONFIG_DCB */
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 242c78469181..6ef083002f5b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -62,7 +62,7 @@ struct ice_aqc_req_res {
#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
- /* For SDP: pin id of the SDP */
+ /* For SDP: pin ID of the SDP */
__le32 res_number;
/* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */
__le16 status;
@@ -747,6 +747,32 @@ struct ice_aqc_delete_elem {
__le32 teid[1];
};
+/* Query Port ETS (indirect 0x040E)
+ *
+ * This indirect command is used to query port TC node configuration.
+ */
+struct ice_aqc_query_port_ets {
+ __le32 port_teid;
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_port_ets_elem {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ /* 3 bits for UP per TC 0-7, 4th byte reserved */
+ __le32 up2tc;
+ u8 tc_bw_share[8];
+ __le32 port_eir_prof_id;
+ __le32 port_cir_prof_id;
+ /* 3 bits per Node priority to TC 0-7, 4th byte reserved */
+ __le32 tc_node_prio;
+#define ICE_TC_NODE_PRIO_S 0x4
+ u8 reserved1[4];
+ __le32 tc_node_teid[8]; /* Used for response, reserved in command */
+};
+
/* Query Scheduler Resource Allocation (indirect 0x0412)
* This indirect command retrieves the scheduler resources allocated by
* EMP Firmware to the given PF.
@@ -953,8 +979,9 @@ struct ice_aqc_set_phy_cfg_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
__le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
u8 caps;
-#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
-#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define ICE_AQ_PHY_ENA_VALID_MASK ICE_M(0xef, 0)
+#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
#define ICE_AQ_PHY_ENA_LINK BIT(3)
#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5)
@@ -1023,7 +1050,7 @@ struct ice_aqc_get_link_status_data {
u8 ext_info;
#define ICE_AQ_LINK_PHY_TEMP_ALARM BIT(0)
#define ICE_AQ_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
- /* Port TX Suspended */
+ /* Port Tx Suspended */
#define ICE_AQ_LINK_TX_S 2
#define ICE_AQ_LINK_TX_M (0x03 << ICE_AQ_LINK_TX_S)
#define ICE_AQ_LINK_TX_ACTIVE 0
@@ -1119,9 +1146,9 @@ struct ice_aqc_nvm {
};
/**
- * Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to PF command (indirect 0x0801) ID is only used by PF
*
- * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to VF command (indirect 0x0802) ID is only used by PF
*
*/
struct ice_aqc_pf_vf_msg {
@@ -1131,6 +1158,126 @@ struct ice_aqc_pf_vf_msg {
__le32 addr_low;
};
+/* Get LLDP MIB (indirect 0x0A00)
+ * Note: This is also used by the LLDP MIB Change Event (0x0A01)
+ * as the format is the same.
+ */
+struct ice_aqc_lldp_get_mib {
+ u8 type;
+#define ICE_AQ_LLDP_MIB_TYPE_S 0
+#define ICE_AQ_LLDP_MIB_TYPE_M (0x3 << ICE_AQ_LLDP_MIB_TYPE_S)
+#define ICE_AQ_LLDP_MIB_LOCAL 0
+#define ICE_AQ_LLDP_MIB_REMOTE 1
+#define ICE_AQ_LLDP_MIB_LOCAL_AND_REMOTE 2
+#define ICE_AQ_LLDP_BRID_TYPE_S 2
+#define ICE_AQ_LLDP_BRID_TYPE_M (0x3 << ICE_AQ_LLDP_BRID_TYPE_S)
+#define ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID 0
+#define ICE_AQ_LLDP_BRID_TYPE_NON_TPMR 1
+/* Tx pause flags in the 0xA01 event use ICE_AQ_LLDP_TX_* */
+#define ICE_AQ_LLDP_TX_S 0x4
+#define ICE_AQ_LLDP_TX_M (0x03 << ICE_AQ_LLDP_TX_S)
+#define ICE_AQ_LLDP_TX_ACTIVE 0
+#define ICE_AQ_LLDP_TX_SUSPENDED 1
+#define ICE_AQ_LLDP_TX_FLUSHED 3
+/* The following bytes are reserved for the Get LLDP MIB command (0x0A00)
+ * and in the LLDP MIB Change Event (0x0A01). They are valid for the
+ * Get LLDP MIB (0x0A00) response only.
+ */
+ u8 reserved1;
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Configure LLDP MIB Change Event (direct 0x0A01) */
+/* For MIB Change Event use ice_aqc_lldp_get_mib structure above */
+struct ice_aqc_lldp_set_mib_change {
+ u8 command;
+#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1
+ u8 reserved[15];
+};
+
+/* Stop LLDP (direct 0x0A05) */
+struct ice_aqc_lldp_stop {
+ u8 command;
+#define ICE_AQ_LLDP_AGENT_STATE_MASK BIT(0)
+#define ICE_AQ_LLDP_AGENT_STOP 0x0
+#define ICE_AQ_LLDP_AGENT_SHUTDOWN ICE_AQ_LLDP_AGENT_STATE_MASK
+#define ICE_AQ_LLDP_AGENT_PERSIST_DIS BIT(1)
+ u8 reserved[15];
+};
+
+/* Start LLDP (direct 0x0A06) */
+struct ice_aqc_lldp_start {
+ u8 command;
+#define ICE_AQ_LLDP_AGENT_START BIT(0)
+#define ICE_AQ_LLDP_AGENT_PERSIST_ENA BIT(1)
+ u8 reserved[15];
+};
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * The command uses the generic descriptor struct and
+ * returns the struct below as an indirect response.
+ */
+struct ice_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+#define ICE_AQC_CEE_APP_FCOE_S 0
+#define ICE_AQC_CEE_APP_FCOE_M (0x7 << ICE_AQC_CEE_APP_FCOE_S)
+#define ICE_AQC_CEE_APP_ISCSI_S 3
+#define ICE_AQC_CEE_APP_ISCSI_M (0x7 << ICE_AQC_CEE_APP_ISCSI_S)
+#define ICE_AQC_CEE_APP_FIP_S 8
+#define ICE_AQC_CEE_APP_FIP_M (0x7 << ICE_AQC_CEE_APP_FIP_S)
+ __le32 tlv_status;
+#define ICE_AQC_CEE_PG_STATUS_S 0
+#define ICE_AQC_CEE_PG_STATUS_M (0x7 << ICE_AQC_CEE_PG_STATUS_S)
+#define ICE_AQC_CEE_PFC_STATUS_S 3
+#define ICE_AQC_CEE_PFC_STATUS_M (0x7 << ICE_AQC_CEE_PFC_STATUS_S)
+#define ICE_AQC_CEE_FCOE_STATUS_S 8
+#define ICE_AQC_CEE_FCOE_STATUS_M (0x7 << ICE_AQC_CEE_FCOE_STATUS_S)
+#define ICE_AQC_CEE_ISCSI_STATUS_S 11
+#define ICE_AQC_CEE_ISCSI_STATUS_M (0x7 << ICE_AQC_CEE_ISCSI_STATUS_S)
+#define ICE_AQC_CEE_FIP_STATUS_S 16
+#define ICE_AQC_CEE_FIP_STATUS_M (0x7 << ICE_AQC_CEE_FIP_STATUS_S)
+ u8 reserved[12];
+};
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct ice_aqc_lldp_set_local_mib {
+ u8 type;
+#define SET_LOCAL_MIB_TYPE_DCBX_M BIT(0)
+#define SET_LOCAL_MIB_TYPE_LOCAL_MIB 0
+#define SET_LOCAL_MIB_TYPE_CEE_M BIT(1)
+#define SET_LOCAL_MIB_TYPE_CEE_WILLING 0
+#define SET_LOCAL_MIB_TYPE_CEE_NON_WILLING SET_LOCAL_MIB_TYPE_CEE_M
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBx.
+ * The same structure is used for the response, with the command field
+ * being used as the status field.
+ */
+struct ice_aqc_lldp_stop_start_specific_agent {
+ u8 command;
+#define ICE_AQC_START_STOP_AGENT_M BIT(0)
+#define ICE_AQC_START_STOP_AGENT_STOP_DCBX 0
+#define ICE_AQC_START_STOP_AGENT_START_DCBX ICE_AQC_START_STOP_AGENT_M
+ u8 reserved[15];
+};
+
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
@@ -1144,6 +1291,9 @@ struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28
#define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC
+#define ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE \
+ (ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + \
+ ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE)
struct ice_aqc_get_set_rss_keys {
u8 standard_rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
@@ -1185,7 +1335,7 @@ struct ice_aqc_get_set_rss_lut {
__le32 addr_low;
};
-/* Add TX LAN Queues (indirect 0x0C30) */
+/* Add Tx LAN Queues (indirect 0x0C30) */
struct ice_aqc_add_txqs {
u8 num_qgrps;
u8 reserved[3];
@@ -1194,7 +1344,7 @@ struct ice_aqc_add_txqs {
__le32 addr_low;
};
-/* This is the descriptor of each queue entry for the Add TX LAN Queues
+/* This is the descriptor of each queue entry for the Add Tx LAN Queues
* command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp.
*/
struct ice_aqc_add_txqs_perq {
@@ -1206,7 +1356,7 @@ struct ice_aqc_add_txqs_perq {
struct ice_aqc_txsched_elem info;
};
-/* The format of the command buffer for Add TX LAN Queues (0x0C30)
+/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
* is an array of the following structs. Please note that the length of
* each struct ice_aqc_add_tx_qgrp is variable due
* to the variable number of queues in each group!
@@ -1218,7 +1368,7 @@ struct ice_aqc_add_tx_qgrp {
struct ice_aqc_add_txqs_perq txqs[1];
};
-/* Disable TX LAN Queues (indirect 0x0C31) */
+/* Disable Tx LAN Queues (indirect 0x0C31) */
struct ice_aqc_dis_txqs {
u8 cmd_type;
#define ICE_AQC_Q_DIS_CMD_S 0
@@ -1240,7 +1390,7 @@ struct ice_aqc_dis_txqs {
__le32 addr_low;
};
-/* The buffer for Disable TX LAN Queues (indirect 0x0C31)
+/* The buffer for Disable Tx LAN Queues (indirect 0x0C31)
* contains the following structures, arrayed one after the
* other.
* Note: Since the q_id is 16 bits wide, if the
@@ -1387,8 +1537,15 @@ struct ice_aq_desc {
struct ice_aqc_get_topo get_topo;
struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res;
+ struct ice_aqc_query_port_ets port_ets;
struct ice_aqc_nvm nvm;
struct ice_aqc_pf_vf_msg virt;
+ struct ice_aqc_lldp_get_mib lldp_get_mib;
+ struct ice_aqc_lldp_set_mib_change lldp_set_event;
+ struct ice_aqc_lldp_stop lldp_stop;
+ struct ice_aqc_lldp_start lldp_start;
+ struct ice_aqc_lldp_set_local_mib lldp_set_mib;
+ struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_add_txqs add_txqs;
@@ -1421,6 +1578,8 @@ struct ice_aq_desc {
/* error codes */
enum ice_aq_err {
ICE_AQ_RC_OK = 0, /* Success */
+ ICE_AQ_RC_EPERM = 1, /* Operation not permitted */
+ ICE_AQ_RC_ENOENT = 2, /* No such element */
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
@@ -1473,6 +1632,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_sched_elems = 0x0404,
ice_aqc_opc_suspend_sched_elems = 0x0409,
ice_aqc_opc_resume_sched_elems = 0x040A,
+ ice_aqc_opc_query_port_ets = 0x040E,
ice_aqc_opc_delete_sched_elems = 0x040F,
ice_aqc_opc_query_sched_res = 0x0412,
@@ -1490,6 +1650,14 @@ enum ice_adminq_opc {
/* PF/VF mailbox commands */
ice_mbx_opc_send_msg_to_pf = 0x0801,
ice_mbx_opc_send_msg_to_vf = 0x0802,
+ /* LLDP commands */
+ ice_aqc_opc_lldp_get_mib = 0x0A00,
+ ice_aqc_opc_lldp_set_mib_change = 0x0A01,
+ ice_aqc_opc_lldp_stop = 0x0A05,
+ ice_aqc_opc_lldp_start = 0x0A06,
+ ice_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ ice_aqc_opc_lldp_set_local_mib = 0x0A08,
+ ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
@@ -1497,7 +1665,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_rss_key = 0x0B04,
ice_aqc_opc_get_rss_lut = 0x0B05,
- /* TX queue handling commands/events */
+ /* Tx queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 63f003441300..da7878529929 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -31,7 +31,7 @@
* @hw: pointer to the HW structure
*
* This function sets the MAC type of the adapter based on the
- * vendor ID and device ID stored in the hw structure.
+ * vendor ID and device ID stored in the HW structure.
*/
static enum ice_status ice_set_mac_type(struct ice_hw *hw)
{
@@ -77,7 +77,7 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
/**
* ice_aq_manage_mac_read - manage MAC address read command
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @buf: a virtual buffer to hold the manage MAC read response
* @buf_size: Size of the virtual buffer
* @cd: pointer to command details structure or NULL
@@ -262,7 +262,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
*
* Get Link Status (0x607). Returns the link status of the adapter.
*/
-static enum ice_status
+enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
@@ -331,7 +331,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
/* flag cleared so calling functions don't call AQ again */
pi->phy.get_link_info = false;
- return status;
+ return 0;
}
/**
@@ -358,22 +358,22 @@ static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
*/
case ICE_RXDID_FLEX_NIC:
case ICE_RXDID_FLEX_NIC_2:
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
- ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
- ICE_RXFLG_FIN, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
+ ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
+ ICE_FLG_FIN, idx++);
/* flex flag 1 is not used for flexi-flag programming, skipping
* these four FLG64 bits.
*/
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
- ICE_RXFLG_EVLAN_x9100, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
- ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
- ICE_RXFLG_TNL0, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
+ ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
+ ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
+ ICE_FLG_EVLAN_x9100, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
+ ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
+ ICE_FLG_TNL0, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
+ ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
break;
default:
@@ -418,7 +418,7 @@ static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
/**
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*/
static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
@@ -438,7 +438,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
/**
* ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*/
static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
{
@@ -477,7 +477,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
/**
* ice_cfg_fw_log - configure FW logging
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @enable: enable certain FW logging events if true, disable all if false
*
* This function enables/disables the FW logging via Rx CQ events and a UART
@@ -626,7 +626,7 @@ out:
/**
* ice_output_fw_log
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @desc: pointer to the AQ message descriptor
* @buf: pointer to the buffer accompanying the AQ message
*
@@ -642,12 +642,12 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
/**
* ice_get_itr_intrl_gran - determine int/intrl granularity
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Determines the itr/intrl granularities based on the maximum aggregate
* bandwidth according to the device's configuration during power-on.
*/
-static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
+static void ice_get_itr_intrl_gran(struct ice_hw *hw)
{
u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
@@ -664,13 +664,7 @@ static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
hw->itr_gran = ICE_ITR_GRAN_MAX_25;
hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
break;
- default:
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to determine itr/intrl granularity\n");
- return ICE_ERR_CFG;
}
-
- return 0;
}
/**
@@ -697,9 +691,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
return status;
- status = ice_get_itr_intrl_gran(hw);
- if (status)
- return status;
+ ice_get_itr_intrl_gran(hw);
status = ice_init_all_ctrlq(hw);
if (status)
@@ -731,7 +723,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_cqinit;
}
- /* set the back pointer to hw */
+ /* set the back pointer to HW */
hw->port_info->hw = hw;
/* Initialize port_info struct with switch configuration data */
@@ -988,7 +980,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
* @ice_rxq_ctx: pointer to the rxq context
* @rxq_index: the index of the Rx queue
*
- * Copies rxq context from dense structure to hw register space
+ * Copies rxq context from dense structure to HW register space
*/
static enum ice_status
ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
@@ -1001,7 +993,7 @@ ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
if (rxq_index > QRX_CTRL_MAX_INDEX)
return ICE_ERR_PARAM;
- /* Copy each dword separately to hw */
+ /* Copy each dword separately to HW */
for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
wr32(hw, QRX_CONTEXT(i, rxq_index),
*((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
@@ -1045,7 +1037,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* @rxq_index: the index of the Rx queue
*
* Converts rxq context from sparse to dense structure and then writes
- * it to hw register space
+ * it to HW register space
*/
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
@@ -1100,8 +1092,9 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
*
* Dumps debug log about control command with descriptor contents.
*/
-void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
- void *buf, u16 buf_len)
+void
+ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
+ u16 buf_len)
{
struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
u16 len;
@@ -1143,7 +1136,7 @@ void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
/**
* ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @desc: descriptor describing the command
* @buf: buffer to use for indirect commands (NULL for direct commands)
* @buf_size: size of buffer for indirect commands (0 for direct commands)
@@ -1160,7 +1153,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
/**
* ice_aq_get_fw_ver
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cd: pointer to command details structure or NULL
*
* Get the firmware version (0x0001) from the admin queue commands
@@ -1194,7 +1187,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
/**
* ice_aq_q_shutdown
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @unloading: is the driver unloading itself
*
* Tell the Firmware that we're shutting down the AdminQ and whether
@@ -1217,8 +1210,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
/**
* ice_aq_req_res
- * @hw: pointer to the hw struct
- * @res: resource id
+ * @hw: pointer to the HW struct
+ * @res: resource ID
* @access: access type
* @sdp_number: resource number
* @timeout: the maximum time in ms that the driver may hold the resource
@@ -1303,8 +1296,8 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
/**
* ice_aq_release_res
- * @hw: pointer to the hw struct
- * @res: resource id
+ * @hw: pointer to the HW struct
+ * @res: resource ID
* @sdp_number: resource number
* @cd: pointer to command details structure or NULL
*
@@ -1330,7 +1323,7 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
/**
* ice_acquire_res
* @hw: pointer to the HW structure
- * @res: resource id
+ * @res: resource ID
* @access: access type (read or write)
* @timeout: timeout in milliseconds
*
@@ -1392,7 +1385,7 @@ ice_acquire_res_exit:
/**
* ice_release_res
* @hw: pointer to the HW structure
- * @res: resource id
+ * @res: resource ID
*
* This function will release a resource using the proper Admin Command.
*/
@@ -1404,7 +1397,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
status = ice_aq_release_res(hw, res, 0, NULL);
/* there are some rare cases when trying to release the resource
- * results in an admin Q timeout, so handle them correctly
+ * results in an admin queue timeout, so handle them correctly
*/
while ((status == ICE_ERR_AQ_TIMEOUT) &&
(total_delay < hw->adminq.sq_cmd_timeout)) {
@@ -1415,13 +1408,15 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
}
/**
- * ice_get_guar_num_vsi - determine number of guar VSI for a PF
- * @hw: pointer to the hw structure
+ * ice_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
*
* Determine the number of valid functions by going through the bitmap returned
- * from parsing capabilities and use this to calculate the number of VSI per PF.
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
*/
-static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
+static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
{
u8 funcs;
@@ -1432,12 +1427,12 @@ static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
if (!funcs)
return 0;
- return ICE_MAX_VSI / funcs;
+ return max / funcs;
}
/**
* ice_parse_caps - parse function/device capabilities
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @buf: pointer to a buffer containing function/device capability records
* @cap_count: number of capability records in the list
* @opc: type of capabilities list to parse
@@ -1512,7 +1507,8 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
"HW caps: Dev.VSI cnt = %d\n",
dev_p->num_vsi_allocd_to_host);
} else if (func_p) {
- func_p->guar_num_vsi = ice_get_guar_num_vsi(hw);
+ func_p->guar_num_vsi =
+ ice_get_num_per_func(hw, ICE_MAX_VSI);
ice_debug(hw, ICE_DBG_INIT,
"HW caps: Func.VSI cnt = %d\n",
number);
@@ -1578,7 +1574,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
/**
* ice_aq_discover_caps - query function/device capabilities
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @buf: a virtual buffer to hold the capabilities
* @buf_size: Size of the virtual buffer
* @cap_count: cap count needed if AQ err==ENOMEM
@@ -1617,8 +1613,8 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
* @hw: pointer to the hardware structure
* @opc: capabilities type to discover - pass in the command opcode
*/
-static enum ice_status ice_discover_caps(struct ice_hw *hw,
- enum ice_adminq_opc opc)
+static enum ice_status
+ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
{
enum ice_status status;
u32 cap_count;
@@ -1677,7 +1673,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
/**
* ice_aq_manage_mac_write - manage MAC address write command
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
* @flags: flags to control write behavior
* @cd: pointer to command details structure or NULL
@@ -1705,7 +1701,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
/**
* ice_aq_clear_pxe_mode
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Tell the firmware that the driver is taking over from PXE (0x0110).
*/
@@ -1721,7 +1717,7 @@ static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
/**
* ice_clear_pxe_mode - clear pxe operations mode
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Make sure all PXE mode settings are cleared, including things
* like descriptor fetch/write-back mode.
@@ -1737,10 +1733,10 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
* @phy_type_low: lower part of phy_type
* @phy_type_high: higher part of phy_type
*
- * This helper function will convert an entry in phy type structure
+ * This helper function will convert an entry in PHY type structure
* [phy_type_low, phy_type_high] to its corresponding link speed.
* Note: In the structure of [phy_type_low, phy_type_high], there should
- * be one bit set, as this function will convert one phy type to its
+ * be one bit set, as this function will convert one PHY type to its
* speed.
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
@@ -1884,10 +1880,10 @@ void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap)
{
- u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
u64 pt_high;
u64 pt_low;
int index;
+ u16 speed;
/* We first check with low part of phy_type */
for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
@@ -1910,7 +1906,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
/**
* ice_aq_set_phy_cfg
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @lport: logical port number
* @cfg: structure with PHY configuration data to be set
* @cd: pointer to command details structure or NULL
@@ -1929,6 +1925,15 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
if (!cfg)
return ICE_ERR_PARAM;
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
+ ice_debug(hw, ICE_DBG_PHY,
+ "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
+ cfg->caps);
+
+ cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
+ }
+
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
desc.params.set_phy.lport_num = lport;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@@ -2016,7 +2021,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
if (!pcaps)
return ICE_ERR_NO_MEMORY;
- /* Get the current phy config */
+ /* Get the current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (status) {
@@ -2027,8 +2032,10 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
/* clear the old pause settings */
cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
ICE_AQC_PHY_EN_RX_LINK_PAUSE);
+
/* set the new capabilities */
cfg.caps |= pause_mask;
+
/* If the capabilities have changed, then set the new config */
if (cfg.caps != pcaps->caps) {
int retry_count, retry_max = 10;
@@ -2136,6 +2143,32 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
}
/**
+ * ice_aq_set_event_mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set event mask (0x0613)
+ */
+enum ice_status
+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_event_mask *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
+
+ cmd->lport_num = port_num;
+
+ cmd->event_mask = cpu_to_le16(mask);
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_aq_set_port_id_led
* @pi: pointer to the port information
* @is_orig_mode: is this LED set to original mode (by the net-list)
@@ -2297,7 +2330,7 @@ ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
/**
* __ice_aq_get_set_rss_key
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_id: VSI FW index
* @key: pointer to key info struct
* @set: set true to set the key, false to get the key
@@ -2332,7 +2365,7 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
/**
* ice_aq_get_rss_key
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: software VSI handle
* @key: pointer to key info struct
*
@@ -2351,7 +2384,7 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
/**
* ice_aq_set_rss_key
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: software VSI handle
* @keys: pointer to key info struct
*
@@ -2436,7 +2469,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
* @num_qgrps: number of groups in the list
* @qg_list: the list of groups to disable
* @buf_size: the total size of the qg_list buffer in bytes
- * @rst_src: if called due to reset, specifies the RST source
+ * @rst_src: if called due to reset, specifies the reset source
* @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
@@ -2476,7 +2509,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
break;
case ICE_VF_RESET:
cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
- /* In this case, FW expects vmvf_num to be absolute VF id */
+ /* In this case, FW expects vmvf_num to be absolute VF ID */
cmd->vmvf_and_timeout |=
cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
ICE_AQC_Q_DIS_VMVF_NUM_M);
@@ -2534,8 +2567,8 @@ do_aq:
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u8 src_byte, dest_byte, mask;
u8 *from, *dest;
@@ -2573,8 +2606,8 @@ static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u16 src_word, mask;
__le16 dest_word;
@@ -2616,8 +2649,8 @@ static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u32 src_dword, mask;
__le32 dest_dword;
@@ -2667,8 +2700,8 @@ static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u64 src_qword, mask;
__le64 dest_qword;
@@ -2750,24 +2783,50 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
+ * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ * @q_handle: software queue handle
+ */
+static struct ice_q_ctx *
+ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
+{
+ struct ice_vsi_ctx *vsi;
+ struct ice_q_ctx *q_ctx;
+
+ vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi)
+ return NULL;
+ if (q_handle >= vsi->num_lan_q_entries[tc])
+ return NULL;
+ if (!vsi->lan_q_ctx[tc])
+ return NULL;
+ q_ctx = vsi->lan_q_ctx[tc];
+ return &q_ctx[q_handle];
+}
+
+/**
* ice_ena_vsi_txq
* @pi: port information structure
* @vsi_handle: software VSI handle
- * @tc: tc number
+ * @tc: TC number
+ * @q_handle: software queue handle
* @num_qgrps: Number of added queue groups
* @buf: list of queue groups to be added
* @buf_size: size of buffer for indirect command
* @cd: pointer to command details structure or NULL
*
- * This function adds one lan q
+ * This function adds one LAN queue
*/
enum ice_status
-ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
- struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
+ u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_sched_node *parent;
+ struct ice_q_ctx *q_ctx;
enum ice_status status;
struct ice_hw *hw;
@@ -2784,6 +2843,14 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
mutex_lock(&pi->sched_lock);
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
+ if (!q_ctx) {
+ ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
+ q_handle);
+ status = ICE_ERR_PARAM;
+ goto ena_txq_exit;
+ }
+
/* find a parent node */
parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
ICE_SCHED_NODE_OWNER_LAN);
@@ -2803,14 +2870,14 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
* Bit 5-6.
* - Bit 7 is reserved.
* Without setting the generic section as valid in valid_sections, the
- * Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
+ * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
*/
buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
- /* add the lan q */
+ /* add the LAN queue */
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
if (status) {
- ice_debug(hw, ICE_DBG_SCHED, "enable Q %d failed %d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
le16_to_cpu(buf->txqs[0].txq_id),
hw->adminq.sq_last_status);
goto ena_txq_exit;
@@ -2818,8 +2885,9 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
node.node_teid = buf->txqs[0].q_teid;
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
+ q_ctx->q_handle = q_handle;
- /* add a leaf node into schduler tree q layer */
+ /* add a leaf node into schduler tree queue layer */
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
ena_txq_exit:
@@ -2830,35 +2898,43 @@ ena_txq_exit:
/**
* ice_dis_vsi_txq
* @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
* @num_queues: number of queues
+ * @q_handles: pointer to software queue handle array
* @q_ids: pointer to the q_id array
* @q_teids: pointer to queue node teids
- * @rst_src: if called due to reset, specifies the RST source
+ * @rst_src: if called due to reset, specifies the reset source
* @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
* This function removes queues and their corresponding nodes in SW DB
*/
enum ice_status
-ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
- u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
+ u16 *q_handles, u16 *q_ids, u32 *q_teids,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_aqc_dis_txq_item qg_list;
+ struct ice_q_ctx *q_ctx;
u16 i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
- /* if queue is disabled already yet the disable queue command has to be
- * sent to complete the VF reset, then call ice_aq_dis_lan_txq without
- * any queue information
- */
- if (!num_queues && rst_src)
- return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
- NULL);
+ if (!num_queues) {
+ /* if queue is disabled already yet the disable queue command
+ * has to be sent to complete the VF reset, then call
+ * ice_aq_dis_lan_txq without any queue information
+ */
+ if (rst_src)
+ return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
+ vmvf_num, NULL);
+ return ICE_ERR_CFG;
+ }
mutex_lock(&pi->sched_lock);
@@ -2868,6 +2944,17 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
if (!node)
continue;
+ q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
+ if (!q_ctx) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
+ q_handles[i]);
+ continue;
+ }
+ if (q_ctx->q_handle != q_handles[i]) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
+ q_ctx->q_handle, q_handles[i]);
+ continue;
+ }
qg_list.parent_teid = node->info.parent_teid;
qg_list.num_qs = 1;
qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
@@ -2878,18 +2965,19 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
if (status)
break;
ice_free_sched_node(pi, node);
+ q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
}
mutex_unlock(&pi->sched_lock);
return status;
}
/**
- * ice_cfg_vsi_qs - configure the new/exisiting VSI queues
+ * ice_cfg_vsi_qs - configure the new/existing VSI queues
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc_bitmap: TC bitmap
* @maxqs: max queues array per TC
- * @owner: lan or rdma
+ * @owner: LAN or RDMA
*
* This function adds/updates the VSI queues per TC.
*/
@@ -2908,7 +2996,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
mutex_lock(&pi->sched_lock);
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ ice_for_each_traffic_class(i) {
/* configuration is possible only if TC node is present */
if (!ice_sched_get_tc_node(pi, i))
continue;
@@ -2924,13 +3012,13 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
}
/**
- * ice_cfg_vsi_lan - configure VSI lan queues
+ * ice_cfg_vsi_lan - configure VSI LAN queues
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc_bitmap: TC bitmap
- * @max_lanqs: max lan queues array per TC
+ * @max_lanqs: max LAN queues array per TC
*
- * This function adds/updates the VSI lan queues per TC.
+ * This function adds/updates the VSI LAN queues per TC.
*/
enum ice_status
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
@@ -2942,7 +3030,7 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
/**
* ice_replay_pre_init - replay pre initialization
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Initializes required config data for VSI, FD, ACL, and RSS before replay.
*/
@@ -2966,7 +3054,7 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
/**
* ice_replay_vsi - replay VSI configuration
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: driver VSI handle
*
* Restore all VSI configuration after reset. It is required to call this
@@ -2993,7 +3081,7 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_replay_post - post replay configuration cleanup
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Post replay cleanup.
*/
@@ -3012,8 +3100,9 @@ void ice_replay_post(struct ice_hw *hw)
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
-void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
+void
+ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
+ bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
{
u64 new_data;
@@ -3043,8 +3132,9 @@ void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
-void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
- u64 *prev_stat, u64 *cur_stat)
+void
+ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat)
{
u32 new_data;
@@ -3063,3 +3153,28 @@ void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
/* to manage the potential roll-over */
*cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
}
+
+/**
+ * ice_sched_query_elem - query element information from HW
+ * @hw: pointer to the HW struct
+ * @node_teid: node TEID to be queried
+ * @buf: buffer to element information
+ *
+ * This function queries HW element information
+ */
+enum ice_status
+ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
+ struct ice_aqc_get_elem *buf)
+{
+ u16 buf_size, num_elem_ret = 0;
+ enum ice_status status;
+
+ buf_size = sizeof(*buf);
+ memset(buf, 0, buf_size);
+ buf->generic[0].node_teid = cpu_to_le32(node_teid);
+ status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
+ NULL);
+ if (status || num_elem_ret != 1)
+ ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index d7c7c2ed8823..f1ddebf45231 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -9,8 +9,8 @@
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
-void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
- u16 buf_len);
+void
+ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
@@ -28,8 +28,8 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
-enum ice_status ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words,
- u16 *data);
+enum ice_status
+ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
@@ -89,25 +89,37 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
+ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
+ struct ice_link_status *link, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
+ struct ice_sq_cd *cd);
+enum ice_status
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
enum ice_status
-ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
- u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
- struct ice_sq_cd *cmd_details);
+ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
+ u16 *q_handle, u16 *q_ids, u32 *q_teids,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cd);
enum ice_status
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs);
enum ice_status
-ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
- struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
+ u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
-void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
-void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
- u64 *prev_stat, u64 *cur_stat);
+void
+ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
+ bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
+void
+ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat);
+enum ice_status
+ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
+ struct ice_aqc_get_elem *buf);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 2bf5e11f559a..cc8cb5fdcdc1 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -51,7 +51,7 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
/**
* ice_check_sq_alive
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
*
* Returns true if Queue is enabled else false.
@@ -287,7 +287,7 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
- * Configure base address and length registers for the receive (event q)
+ * Configure base address and length registers for the receive (event queue)
*/
static enum ice_status
ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
@@ -751,7 +751,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/**
* ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
*
* Returns true if the firmware has processed all descriptors on the
@@ -767,7 +767,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/**
* ice_sq_send_cmd - send command to Control Queue (ATQ)
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buf: buffer to use for indirect commands (or NULL for direct commands)
@@ -962,7 +962,7 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
/**
* ice_clean_rq_elem
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 0038a4109c99..e0585394d984 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -79,6 +79,7 @@ struct ice_rq_event_info {
/* Control Queue information */
struct ice_ctl_q_info {
enum ice_ctl_q qtype;
+ enum ice_aq_err rq_last_status; /* last status on receive queue */
struct ice_ctl_q_ring rq; /* receive queue */
struct ice_ctl_q_ring sq; /* send queue */
u32 sq_cmd_timeout; /* send queue cmd write back timeout */
@@ -86,10 +87,9 @@ struct ice_ctl_q_info {
u16 num_sq_entries; /* send queue depth */
u16 rq_buf_size; /* receive queue buffer size */
u16 sq_buf_size; /* send queue buffer size */
+ enum ice_aq_err sq_last_status; /* last status on send queue */
struct mutex sq_lock; /* Send queue lock */
struct mutex rq_lock; /* Receive queue lock */
- enum ice_aq_err sq_last_status; /* last status on send queue */
- enum ice_aq_err rq_last_status; /* last status on receive queue */
};
#endif /* _ICE_CONTROLQ_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
new file mode 100644
index 000000000000..8bbf48e04a1c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -0,0 +1,1392 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_sched.h"
+#include "ice_dcb.h"
+
+/**
+ * ice_aq_get_lldp_mib
+ * @hw: pointer to the HW struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
+ * @buf_size: size of the buffer (in bytes)
+ * @local_len: length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cd: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet). (0x0A00)
+ */
+static enum ice_status
+ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
+ u16 buf_size, u16 *local_len, u16 *remote_len,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_get_mib *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.lldp_get_mib;
+
+ if (buf_size == 0 || !buf)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib);
+
+ cmd->type = mib_type & ICE_AQ_LLDP_MIB_TYPE_M;
+ cmd->type |= (bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) &
+ ICE_AQ_LLDP_BRID_TYPE_M;
+
+ desc.datalen = cpu_to_le16(buf_size);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status) {
+ if (local_len)
+ *local_len = le16_to_cpu(cmd->local_len);
+ if (remote_len)
+ *remote_len = le16_to_cpu(cmd->remote_len);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_cfg_lldp_mib_change
+ * @hw: pointer to the HW struct
+ * @ena_update: Enable or Disable event posting
+ * @cd: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes (0x0A01)
+ */
+enum ice_status
+ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_set_mib_change *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_set_event;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_mib_change);
+
+ if (!ena_update)
+ cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_stop_lldp
+ * @hw: pointer to the HW struct
+ * @shutdown_lldp_agent: True if LLDP Agent needs to be Shutdown
+ * False if LLDP Agent needs to be Stopped
+ * @cd: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent (0x0A05)
+ */
+enum ice_status
+ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_stop *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_stop;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_stop);
+
+ if (shutdown_lldp_agent)
+ cmd->command |= ICE_AQ_LLDP_AGENT_SHUTDOWN;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_start_lldp
+ * @hw: pointer to the HW struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports. (0x0A06)
+ */
+enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_start *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_start;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_start);
+
+ cmd->command = ICE_AQ_LLDP_AGENT_START;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the HW struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
+ * @buf_size: size of the buffer (in bytes)
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB. (0x0A08)
+ */
+static enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_set_local_mib *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_set_mib;
+
+ if (buf_size == 0 || !buf)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
+
+ desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
+ desc.datalen = cpu_to_le16(buf_size);
+
+ cmd->type = mib_type;
+ cmd->length = cpu_to_le16(buf_size);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_get_dcbx_status
+ * @hw: pointer to the HW struct
+ *
+ * Get the DCBX status from the Firmware
+ */
+u8 ice_get_dcbx_status(struct ice_hw *hw)
+{
+ u32 reg;
+
+ reg = rd32(hw, PRTDCB_GENS);
+ return (u8)((reg & PRTDCB_GENS_DCBX_STATUS_M) >>
+ PRTDCB_GENS_DCBX_STATUS_S);
+}
+
+/**
+ * ice_parse_ieee_ets_common_tlv
+ * @buf: Data buffer to be parsed for ETS CFG/REC data
+ * @ets_cfg: Container to store parsed data
+ *
+ * Parses the common data of IEEE 802.1Qaz ETS CFG/REC TLV
+ */
+static void
+ice_parse_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg)
+{
+ u8 offset = 0;
+ int i;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ ets_cfg->prio_table[i * 2] =
+ ((buf[offset] & ICE_IEEE_ETS_PRIO_1_M) >>
+ ICE_IEEE_ETS_PRIO_1_S);
+ ets_cfg->prio_table[i * 2 + 1] =
+ ((buf[offset] & ICE_IEEE_ETS_PRIO_0_M) >>
+ ICE_IEEE_ETS_PRIO_0_S);
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ *
+ * TSA Assignment Table (8 octets)
+ * Octets:| 9 | 10| 11| 12| 13| 14| 15| 16|
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ ice_for_each_traffic_class(i) {
+ ets_cfg->tcbwtable[i] = buf[offset];
+ ets_cfg->tsatable[i] = buf[ICE_MAX_TRAFFIC_CLASS + offset++];
+ }
+}
+
+/**
+ * ice_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ */
+static void
+ice_parse_ieee_etscfg_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = ((buf[0] & ICE_IEEE_ETS_WILLING_M) >>
+ ICE_IEEE_ETS_WILLING_S);
+ etscfg->cbs = ((buf[0] & ICE_IEEE_ETS_CBS_M) >> ICE_IEEE_ETS_CBS_S);
+ etscfg->maxtcs = ((buf[0] & ICE_IEEE_ETS_MAXTC_M) >>
+ ICE_IEEE_ETS_MAXTC_S);
+
+ /* Begin parsing at Priority Assignment Table (offset 1 in buf) */
+ ice_parse_ieee_ets_common_tlv(&buf[1], etscfg);
+}
+
+/**
+ * ice_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ */
+static void
+ice_parse_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* Begin parsing at Priority Assignment Table (offset 1 in buf) */
+ ice_parse_ieee_ets_common_tlv(&buf[1], &dcbcfg->etsrec);
+}
+
+/**
+ * ice_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ */
+static void
+ice_parse_ieee_pfccfg_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = ((buf[0] & ICE_IEEE_PFC_WILLING_M) >>
+ ICE_IEEE_PFC_WILLING_S);
+ dcbcfg->pfc.mbc = ((buf[0] & ICE_IEEE_PFC_MBC_M) >> ICE_IEEE_PFC_MBC_S);
+ dcbcfg->pfc.pfccap = ((buf[0] & ICE_IEEE_PFC_CAP_M) >>
+ ICE_IEEE_PFC_CAP_S);
+ dcbcfg->pfc.pfcena = buf[1];
+}
+
+/**
+ * ice_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ */
+static void
+ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 offset = 0;
+ u16 typelen;
+ int i = 0;
+ u16 len;
+ u8 *buf;
+
+ typelen = ntohs(tlv->typelen);
+ len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ buf = tlv->tlvinfo;
+
+ /* Removing sizeof(ouisubtype) and reserved byte from len.
+ * Remaining len div 3 is number of APP TLVs.
+ */
+ len -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < len) {
+ dcbcfg->app[i].priority = ((buf[offset] &
+ ICE_IEEE_APP_PRIO_M) >>
+ ICE_IEEE_APP_PRIO_S);
+ dcbcfg->app[i].selector = ((buf[offset] &
+ ICE_IEEE_APP_SEL_M) >>
+ ICE_IEEE_APP_SEL_S);
+ dcbcfg->app[i].prot_id = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= ICE_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * ice_parse_ieee_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ */
+static void
+ice_parse_ieee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
+ ICE_LLDP_TLV_SUBTYPE_S);
+ switch (subtype) {
+ case ICE_IEEE_SUBTYPE_ETS_CFG:
+ ice_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_SUBTYPE_ETS_REC:
+ ice_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_SUBTYPE_PFC_CFG:
+ ice_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_SUBTYPE_APP_PRI:
+ ice_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ */
+static void
+ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ int i;
+
+ etscfg = &dcbcfg->etscfg;
+
+ if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M)
+ etscfg->willing = 1;
+
+ etscfg->cbs = 0;
+ /* Priority Group Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ etscfg->prio_table[i * 2] =
+ ((buf[offset] & ICE_CEE_PGID_PRIO_1_M) >>
+ ICE_CEE_PGID_PRIO_1_S);
+ etscfg->prio_table[i * 2 + 1] =
+ ((buf[offset] & ICE_CEE_PGID_PRIO_0_M) >>
+ ICE_CEE_PGID_PRIO_0_S);
+ offset++;
+ }
+
+ /* PG Percentage Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+ * ---------------------------------
+ */
+ ice_for_each_traffic_class(i)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* Number of TCs supported (1 octet) */
+ etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * ice_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ */
+static void
+ice_parse_cee_pfccfg_tlv(struct ice_cee_feat_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M)
+ dcbcfg->pfc.willing = 1;
+
+ /* ------------------------
+ * | PFC Enable | PFC TCs |
+ * ------------------------
+ * | 1 octet | 1 octet |
+ */
+ dcbcfg->pfc.pfcena = buf[0];
+ dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * ice_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ */
+static void
+ice_parse_cee_app_tlv(struct ice_cee_feat_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 len, typelen, offset = 0;
+ struct ice_cee_app_prio *app;
+ u8 i;
+
+ typelen = ntohs(tlv->hdr.typelen);
+ len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+
+ dcbcfg->numapps = len / sizeof(*app);
+ if (!dcbcfg->numapps)
+ return;
+ if (dcbcfg->numapps > ICE_DCBX_MAX_APPS)
+ dcbcfg->numapps = ICE_DCBX_MAX_APPS;
+
+ for (i = 0; i < dcbcfg->numapps; i++) {
+ u8 up, selector;
+
+ app = (struct ice_cee_app_prio *)(tlv->tlvinfo + offset);
+ for (up = 0; up < ICE_MAX_USER_PRIORITY; up++)
+ if (app->prio_map & BIT(up))
+ break;
+
+ dcbcfg->app[i].priority = up;
+
+ /* Get Selector from lower 2 bits, and convert to IEEE */
+ selector = (app->upper_oui_sel & ICE_CEE_APP_SELECTOR_M);
+ switch (selector) {
+ case ICE_CEE_APP_SEL_ETHTYPE:
+ dcbcfg->app[i].selector = ICE_APP_SEL_ETHTYPE;
+ break;
+ case ICE_CEE_APP_SEL_TCPIP:
+ dcbcfg->app[i].selector = ICE_APP_SEL_TCPIP;
+ break;
+ default:
+ /* Keep selector as it is for unknown types */
+ dcbcfg->app[i].selector = selector;
+ }
+
+ dcbcfg->app[i].prot_id = ntohs(app->protocol);
+ /* Move to next app */
+ offset += sizeof(*app);
+ }
+}
+
+/**
+ * ice_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ */
+static void
+ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_cee_feat_tlv *sub_tlv;
+ u8 subtype, feat_tlv_count = 0;
+ u16 len, tlvlen, typelen;
+ u32 ouisubtype;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
+ ICE_LLDP_TLV_SUBTYPE_S);
+ /* Return if not CEE DCBX */
+ if (subtype != ICE_CEE_DCBX_TYPE)
+ return;
+
+ typelen = ntohs(tlv->typelen);
+ tlvlen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ len = sizeof(tlv->typelen) + sizeof(ouisubtype) +
+ sizeof(struct ice_cee_ctrl_tlv);
+ /* Return if no CEE DCBX Feature TLVs */
+ if (tlvlen <= len)
+ return;
+
+ sub_tlv = (struct ice_cee_feat_tlv *)((char *)tlv + len);
+ while (feat_tlv_count < ICE_CEE_MAX_FEAT_TYPE) {
+ u16 sublen;
+
+ typelen = ntohs(sub_tlv->hdr.typelen);
+ sublen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ subtype = (u8)((typelen & ICE_LLDP_TLV_TYPE_M) >>
+ ICE_LLDP_TLV_TYPE_S);
+ switch (subtype) {
+ case ICE_CEE_SUBTYPE_PG_CFG:
+ ice_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case ICE_CEE_SUBTYPE_PFC_CFG:
+ ice_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case ICE_CEE_SUBTYPE_APP_PRI:
+ ice_parse_cee_app_tlv(sub_tlv, dcbcfg);
+ break;
+ default:
+ return; /* Invalid Sub-type return */
+ }
+ feat_tlv_count++;
+ /* Move to next sub TLV */
+ sub_tlv = (struct ice_cee_feat_tlv *)
+ ((char *)sub_tlv + sizeof(sub_tlv->hdr.typelen) +
+ sublen);
+ }
+}
+
+/**
+ * ice_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ */
+static void
+ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ oui = ((ouisubtype & ICE_LLDP_TLV_OUI_M) >> ICE_LLDP_TLV_OUI_S);
+ switch (oui) {
+ case ICE_IEEE_8021QAZ_OUI:
+ ice_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ case ICE_CEE_DCBX_OUI:
+ ice_parse_cee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_lldp_to_dcb_cfg
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ */
+enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_lldp_org_tlv *tlv;
+ enum ice_status ret = 0;
+ u16 offset = 0;
+ u16 typelen;
+ u16 type;
+ u16 len;
+
+ if (!lldpmib || !dcbcfg)
+ return ICE_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += ETH_HLEN;
+ tlv = (struct ice_lldp_org_tlv *)lldpmib;
+ while (1) {
+ typelen = ntohs(tlv->typelen);
+ type = ((typelen & ICE_LLDP_TLV_TYPE_M) >> ICE_LLDP_TLV_TYPE_S);
+ len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ offset += sizeof(typelen) + len;
+
+ /* END TLV or beyond LLDPDU size */
+ if (type == ICE_TLV_TYPE_END || offset > ICE_LLDPDU_SIZE)
+ break;
+
+ switch (type) {
+ case ICE_TLV_TYPE_ORG:
+ ice_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_aq_get_dcb_cfg
+ * @hw: pointer to the HW struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the firmware
+ */
+static enum ice_status
+ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ enum ice_status ret;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
+ if (!lldpmib)
+ return ICE_ERR_NO_MEMORY;
+
+ ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib,
+ ICE_LLDPDU_SIZE, NULL, NULL, NULL);
+
+ if (!ret)
+ /* Parse LLDP MIB to get DCB configuration */
+ ret = ice_lldp_to_dcb_cfg(lldpmib, dcbcfg);
+
+ devm_kfree(ice_hw_to_dev(hw), lldpmib);
+
+ return ret;
+}
+
+/**
+ * ice_aq_start_stop_dcbx - Start/Stop DCBx service in FW
+ * @hw: pointer to the HW struct
+ * @start_dcbx_agent: True if DCBx Agent needs to be started
+ * False if DCBx Agent needs to be stopped
+ * @dcbx_agent_status: FW indicates back the DCBx agent status
+ * True if DCBx Agent is active
+ * False if DCBx Agent is stopped
+ * @cd: pointer to command details structure or NULL
+ *
+ * Start/Stop the embedded dcbx Agent. In case that this wrapper function
+ * returns ICE_SUCCESS, caller will need to check if FW returns back the same
+ * value as stated in dcbx_agent_status, and react accordingly. (0x0A09)
+ */
+enum ice_status
+ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
+ bool *dcbx_agent_status, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_stop_start_specific_agent *cmd;
+ enum ice_status status;
+ struct ice_aq_desc desc;
+ u16 opcode;
+
+ cmd = &desc.params.lldp_agent_ctrl;
+
+ opcode = ice_aqc_opc_lldp_stop_start_specific_agent;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+
+ if (start_dcbx_agent)
+ cmd->command = ICE_AQC_START_STOP_AGENT_START_DCBX;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ *dcbx_agent_status = false;
+
+ if (!status &&
+ cmd->command == ICE_AQC_START_STOP_AGENT_START_DCBX)
+ *dcbx_agent_status = true;
+
+ return status;
+}
+
+/**
+ * ice_aq_get_cee_dcb_cfg
+ * @hw: pointer to the HW struct
+ * @buff: response buffer that stores CEE operational configuration
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get CEE DCBX mode operational configuration from firmware (0x0A07)
+ */
+static enum ice_status
+ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
+ struct ice_aqc_get_cee_dcb_cfg_resp *buff,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cee_dcb_cfg);
+
+ return ice_aq_send_cmd(hw, &desc, (void *)buff, sizeof(*buff), cd);
+}
+
+/**
+ * ice_cee_to_dcb_cfg
+ * @cee_cfg: pointer to CEE configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE configuration from firmware to DCB configuration
+ */
+static void
+ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
+ u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
+ u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+ u8 i, err, sync, oper, app_index, ice_app_sel_type;
+ u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
+ u16 ice_app_prot_id_type;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) {
+ dcbcfg->etscfg.prio_table[i * 2] =
+ ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_0_M) >>
+ ICE_CEE_PGID_PRIO_0_S);
+ dcbcfg->etscfg.prio_table[i * 2 + 1] =
+ ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_1_M) >>
+ ICE_CEE_PGID_PRIO_1_S);
+ }
+
+ ice_for_each_traffic_class(i) {
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ if (dcbcfg->etscfg.prio_table[i] == ICE_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prio_table[i] = cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS;
+
+ app_index = 0;
+ for (i = 0; i < 3; i++) {
+ if (i == 0) {
+ /* FCoE APP */
+ ice_aqc_cee_status_mask = ICE_AQC_CEE_FCOE_STATUS_M;
+ ice_aqc_cee_status_shift = ICE_AQC_CEE_FCOE_STATUS_S;
+ ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FCOE_M;
+ ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FCOE_S;
+ ice_app_sel_type = ICE_APP_SEL_ETHTYPE;
+ ice_app_prot_id_type = ICE_APP_PROT_ID_FCOE;
+ } else if (i == 1) {
+ /* iSCSI APP */
+ ice_aqc_cee_status_mask = ICE_AQC_CEE_ISCSI_STATUS_M;
+ ice_aqc_cee_status_shift = ICE_AQC_CEE_ISCSI_STATUS_S;
+ ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_ISCSI_M;
+ ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S;
+ ice_app_sel_type = ICE_APP_SEL_TCPIP;
+ ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI;
+ } else {
+ /* FIP APP */
+ ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M;
+ ice_aqc_cee_status_shift = ICE_AQC_CEE_FIP_STATUS_S;
+ ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FIP_M;
+ ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FIP_S;
+ ice_app_sel_type = ICE_APP_SEL_ETHTYPE;
+ ice_app_prot_id_type = ICE_APP_PROT_ID_FIP;
+ }
+
+ status = (tlv_status & ice_aqc_cee_status_mask) >>
+ ice_aqc_cee_status_shift;
+ err = (status & ICE_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & ICE_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & ICE_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FCoE/iSCSI/FIP APP if Error is False and
+ * Oper/Sync is True
+ */
+ if (!err && sync && oper) {
+ dcbcfg->app[app_index].priority =
+ (app_prio & ice_aqc_cee_app_mask) >>
+ ice_aqc_cee_app_shift;
+ dcbcfg->app[app_index].selector = ice_app_sel_type;
+ dcbcfg->app[app_index].prot_id = ice_app_prot_id_type;
+ app_index++;
+ }
+ }
+
+ dcbcfg->numapps = app_index;
+}
+
+/**
+ * ice_get_ieee_dcb_cfg
+ * @pi: port information structure
+ * @dcbx_mode: mode of DCBX (IEEE or CEE)
+ *
+ * Get IEEE or CEE mode DCB configuration from the Firmware
+ */
+static enum ice_status
+ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
+{
+ struct ice_dcbx_cfg *dcbx_cfg = NULL;
+ enum ice_status ret;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ if (dcbx_mode == ICE_DCBX_MODE_IEEE)
+ dcbx_cfg = &pi->local_dcbx_cfg;
+ else if (dcbx_mode == ICE_DCBX_MODE_CEE)
+ dcbx_cfg = &pi->desired_dcbx_cfg;
+
+ /* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE
+ * or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE
+ */
+ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_LOCAL,
+ ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ dcbx_cfg = &pi->remote_dcbx_cfg;
+ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
+ ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/**
+ * ice_get_dcb_cfg
+ * @pi: port information structure
+ *
+ * Get DCB configuration from the Firmware
+ */
+enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
+{
+ struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ struct ice_dcbx_cfg *dcbx_cfg;
+ enum ice_status ret;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
+ if (!ret) {
+ /* CEE mode */
+ dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
+ dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status);
+ ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
+ ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
+ } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
+ /* CEE mode not enabled try querying IEEE data */
+ dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
+ ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_init_dcb
+ * @hw: pointer to the HW struct
+ *
+ * Update DCB configuration from the Firmware
+ */
+enum ice_status ice_init_dcb(struct ice_hw *hw)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status ret = 0;
+
+ if (!hw->func_caps.common_cap.dcb)
+ return ICE_ERR_NOT_SUPPORTED;
+
+ pi->is_sw_lldp = true;
+
+ /* Get DCBX status */
+ pi->dcbx_status = ice_get_dcbx_status(hw);
+
+ if (pi->dcbx_status == ICE_DCBX_STATUS_DONE ||
+ pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS) {
+ /* Get current DCBX configuration */
+ ret = ice_get_dcb_cfg(pi);
+ pi->is_sw_lldp = (hw->adminq.sq_last_status == ICE_AQ_RC_EPERM);
+ if (ret)
+ return ret;
+ } else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) {
+ return ICE_ERR_NOT_READY;
+ }
+
+ /* Configure the LLDP MIB change event */
+ ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL);
+ if (!ret)
+ pi->is_sw_lldp = false;
+
+ return ret;
+}
+
+/**
+ * ice_add_ieee_ets_common_tlv
+ * @buf: Data buffer to be populated with ice_dcb_ets_cfg data
+ * @ets_cfg: Container for ice_dcb_ets_cfg data
+ *
+ * Populate the TLV buffer with ice_dcb_ets_cfg data
+ */
+static void
+ice_add_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg)
+{
+ u8 priority0, priority1;
+ u8 offset = 0;
+ int i;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) {
+ priority0 = ets_cfg->prio_table[i * 2] & 0xF;
+ priority1 = ets_cfg->prio_table[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << ICE_IEEE_ETS_PRIO_1_S) | priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ *
+ * TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ ice_for_each_traffic_class(i) {
+ buf[offset] = ets_cfg->tcbwtable[i];
+ buf[ICE_MAX_TRAFFIC_CLASS + offset] = ets_cfg->tsatable[i];
+ offset++;
+ }
+}
+
+/**
+ * ice_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ */
+static void
+ice_add_ieee_ets_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u8 maxtcwilling = 0;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_ETS_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ if (etscfg->willing)
+ maxtcwilling = BIT(ICE_IEEE_ETS_WILLING_S);
+ maxtcwilling |= etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M;
+ buf[0] = maxtcwilling;
+
+ /* Begin adding at Priority Assignment Table (offset 1 in buf) */
+ ice_add_ieee_ets_common_tlv(&buf[1], etscfg);
+}
+
+/**
+ * ice_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ */
+static void
+ice_add_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etsrec;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_ETS_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ etsrec = &dcbcfg->etsrec;
+
+ /* First Octet is reserved */
+ /* Begin adding at Priority Assignment Table (offset 1 in buf) */
+ ice_add_ieee_ets_common_tlv(&buf[1], etsrec);
+}
+
+/**
+ * ice_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store which holds the PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ */
+static void
+ice_add_ieee_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_PFC_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ if (dcbcfg->pfc.willing)
+ buf[0] = BIT(ICE_IEEE_PFC_WILLING_S);
+
+ if (dcbcfg->pfc.mbc)
+ buf[0] |= BIT(ICE_IEEE_PFC_MBC_S);
+
+ buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcena;
+}
+
+/**
+ * ice_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store which holds the APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ */
+static void
+ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 typelen, len, offset = 0;
+ u8 priority, selector, i = 0;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ /* No APP TLVs then just return */
+ if (dcbcfg->numapps == 0)
+ return;
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_APP_PRI);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* Move offset to App Priority Table */
+ offset++;
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (i < dcbcfg->numapps) {
+ priority = dcbcfg->app[i].priority & 0x7;
+ selector = dcbcfg->app[i].selector & 0x7;
+ buf[offset] = (priority << ICE_IEEE_APP_PRIO_S) | selector;
+ buf[offset + 1] = (dcbcfg->app[i].prot_id >> 0x8) & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].prot_id & 0xFF;
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= ICE_DCBX_MAX_APPS)
+ break;
+ }
+ /* len includes size of ouisubtype + 1 reserved + 3*numapps */
+ len = sizeof(tlv->ouisubtype) + 1 + (i * 3);
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | (len & 0x1FF));
+ tlv->typelen = htons(typelen);
+}
+
+/**
+ * ice_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: Fill TLV data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ * @tlvid: Type of IEEE TLV
+ *
+ * Add tlv information
+ */
+static void
+ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg,
+ u16 tlvid)
+{
+ switch (tlvid) {
+ case ICE_IEEE_TLV_ID_ETS_CFG:
+ ice_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_ETS_REC:
+ ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_PFC_CFG:
+ ice_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_APP_PRI:
+ ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_dcb_cfg_to_lldp - Convert DCB configuration to MIB format
+ * @lldpmib: pointer to the HW struct
+ * @miblen: length of LLDP MIB
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Convert the DCB configuration to MIB format
+ */
+static void
+ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 len, offset = 0, tlvid = ICE_TLV_ID_START;
+ struct ice_lldp_org_tlv *tlv;
+ u16 typelen;
+
+ tlv = (struct ice_lldp_org_tlv *)lldpmib;
+ while (1) {
+ ice_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+ typelen = ntohs(tlv->typelen);
+ len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ if (len)
+ offset += len + 2;
+ /* END TLV or beyond LLDPDU size */
+ if (tlvid >= ICE_TLV_ID_END_OF_LLDPPDU ||
+ offset > ICE_LLDPDU_SIZE)
+ break;
+ /* Move to next TLV */
+ if (len)
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+ }
+ *miblen = offset;
+}
+
+/**
+ * ice_set_dcb_cfg - Set the local LLDP MIB to FW
+ * @pi: port information structure
+ *
+ * Set DCB configuration to the Firmware
+ */
+enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
+{
+ u8 mib_type, *lldpmib = NULL;
+ struct ice_dcbx_cfg *dcbcfg;
+ enum ice_status ret;
+ struct ice_hw *hw;
+ u16 miblen;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ hw = pi->hw;
+
+ /* update the HW local config */
+ dcbcfg = &pi->local_dcbx_cfg;
+ /* Allocate the LLDPDU */
+ lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
+ if (!lldpmib)
+ return ICE_ERR_NO_MEMORY;
+
+ mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
+ if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
+ mib_type |= SET_LOCAL_MIB_TYPE_CEE_NON_WILLING;
+
+ ice_dcb_cfg_to_lldp(lldpmib, &miblen, dcbcfg);
+ ret = ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen,
+ NULL);
+
+ devm_kfree(ice_hw_to_dev(hw), lldpmib);
+
+ return ret;
+}
+
+/**
+ * ice_aq_query_port_ets - query port ets configuration
+ * @pi: port information structure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @cd: pointer to command details structure or NULL
+ *
+ * query current port ets configuration
+ */
+static enum ice_status
+ice_aq_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_query_port_ets *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ cmd = &desc.params.port_ets;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
+ cmd->port_teid = pi->root->info.node_teid;
+
+ status = ice_aq_send_cmd(pi->hw, &desc, buf, buf_size, cd);
+ return status;
+}
+
+/**
+ * ice_update_port_tc_tree_cfg - update TC tree configuration
+ * @pi: port information structure
+ * @buf: pointer to buffer
+ *
+ * update the SW DB with the new TC changes
+ */
+static enum ice_status
+ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf)
+{
+ struct ice_sched_node *node, *tc_node;
+ struct ice_aqc_get_elem elem;
+ enum ice_status status = 0;
+ u32 teid1, teid2;
+ u8 i, j;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ /* suspend the missing TC nodes */
+ for (i = 0; i < pi->root->num_children; i++) {
+ teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid);
+ ice_for_each_traffic_class(j) {
+ teid2 = le32_to_cpu(buf->tc_node_teid[j]);
+ if (teid1 == teid2)
+ break;
+ }
+ if (j < ICE_MAX_TRAFFIC_CLASS)
+ continue;
+ /* TC is missing */
+ pi->root->children[i]->in_use = false;
+ }
+ /* add the new TC nodes */
+ ice_for_each_traffic_class(j) {
+ teid2 = le32_to_cpu(buf->tc_node_teid[j]);
+ if (teid2 == ICE_INVAL_TEID)
+ continue;
+ /* Is it already present in the tree ? */
+ for (i = 0; i < pi->root->num_children; i++) {
+ tc_node = pi->root->children[i];
+ if (!tc_node)
+ continue;
+ teid1 = le32_to_cpu(tc_node->info.node_teid);
+ if (teid1 == teid2) {
+ tc_node->tc_num = j;
+ tc_node->in_use = true;
+ break;
+ }
+ }
+ if (i < pi->root->num_children)
+ continue;
+ /* new TC */
+ status = ice_sched_query_elem(pi->hw, teid2, &elem);
+ if (!status)
+ status = ice_sched_add_node(pi, 1, &elem.generic[0]);
+ if (status)
+ break;
+ /* update the TC number */
+ node = ice_sched_find_node_by_teid(pi->root, teid2);
+ if (node)
+ node->tc_num = j;
+ }
+ return status;
+}
+
+/**
+ * ice_query_port_ets - query port ets configuration
+ * @pi: port information structure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @cd: pointer to command details structure or NULL
+ *
+ * query current port ets configuration and update the
+ * SW DB with the TC changes
+ */
+enum ice_status
+ice_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ enum ice_status status;
+
+ mutex_lock(&pi->sched_lock);
+ status = ice_aq_query_port_ets(pi, buf, buf_size, cd);
+ if (!status)
+ status = ice_update_port_tc_tree_cfg(pi, buf);
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
new file mode 100644
index 000000000000..e7d4416e3a66
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_DCB_H_
+#define _ICE_DCB_H_
+
+#include "ice_type.h"
+
+#define ICE_DCBX_STATUS_NOT_STARTED 0
+#define ICE_DCBX_STATUS_IN_PROGRESS 1
+#define ICE_DCBX_STATUS_DONE 2
+#define ICE_DCBX_STATUS_DIS 7
+
+#define ICE_TLV_TYPE_END 0
+#define ICE_TLV_TYPE_ORG 127
+
+#define ICE_IEEE_8021QAZ_OUI 0x0080C2
+#define ICE_IEEE_SUBTYPE_ETS_CFG 9
+#define ICE_IEEE_SUBTYPE_ETS_REC 10
+#define ICE_IEEE_SUBTYPE_PFC_CFG 11
+#define ICE_IEEE_SUBTYPE_APP_PRI 12
+
+#define ICE_CEE_DCBX_OUI 0x001B21
+#define ICE_CEE_DCBX_TYPE 2
+#define ICE_CEE_SUBTYPE_PG_CFG 2
+#define ICE_CEE_SUBTYPE_PFC_CFG 3
+#define ICE_CEE_SUBTYPE_APP_PRI 4
+#define ICE_CEE_MAX_FEAT_TYPE 3
+/* Defines for LLDP TLV header */
+#define ICE_LLDP_TLV_LEN_S 0
+#define ICE_LLDP_TLV_LEN_M (0x01FF << ICE_LLDP_TLV_LEN_S)
+#define ICE_LLDP_TLV_TYPE_S 9
+#define ICE_LLDP_TLV_TYPE_M (0x7F << ICE_LLDP_TLV_TYPE_S)
+#define ICE_LLDP_TLV_SUBTYPE_S 0
+#define ICE_LLDP_TLV_SUBTYPE_M (0xFF << ICE_LLDP_TLV_SUBTYPE_S)
+#define ICE_LLDP_TLV_OUI_S 8
+#define ICE_LLDP_TLV_OUI_M (0xFFFFFFUL << ICE_LLDP_TLV_OUI_S)
+
+/* Defines for IEEE ETS TLV */
+#define ICE_IEEE_ETS_MAXTC_S 0
+#define ICE_IEEE_ETS_MAXTC_M (0x7 << ICE_IEEE_ETS_MAXTC_S)
+#define ICE_IEEE_ETS_CBS_S 6
+#define ICE_IEEE_ETS_CBS_M BIT(ICE_IEEE_ETS_CBS_S)
+#define ICE_IEEE_ETS_WILLING_S 7
+#define ICE_IEEE_ETS_WILLING_M BIT(ICE_IEEE_ETS_WILLING_S)
+#define ICE_IEEE_ETS_PRIO_0_S 0
+#define ICE_IEEE_ETS_PRIO_0_M (0x7 << ICE_IEEE_ETS_PRIO_0_S)
+#define ICE_IEEE_ETS_PRIO_1_S 4
+#define ICE_IEEE_ETS_PRIO_1_M (0x7 << ICE_IEEE_ETS_PRIO_1_S)
+#define ICE_CEE_PGID_PRIO_0_S 0
+#define ICE_CEE_PGID_PRIO_0_M (0xF << ICE_CEE_PGID_PRIO_0_S)
+#define ICE_CEE_PGID_PRIO_1_S 4
+#define ICE_CEE_PGID_PRIO_1_M (0xF << ICE_CEE_PGID_PRIO_1_S)
+#define ICE_CEE_PGID_STRICT 15
+
+/* Defines for IEEE TSA types */
+#define ICE_IEEE_TSA_STRICT 0
+#define ICE_IEEE_TSA_ETS 2
+
+/* Defines for IEEE PFC TLV */
+#define ICE_IEEE_PFC_CAP_S 0
+#define ICE_IEEE_PFC_CAP_M (0xF << ICE_IEEE_PFC_CAP_S)
+#define ICE_IEEE_PFC_MBC_S 6
+#define ICE_IEEE_PFC_MBC_M BIT(ICE_IEEE_PFC_MBC_S)
+#define ICE_IEEE_PFC_WILLING_S 7
+#define ICE_IEEE_PFC_WILLING_M BIT(ICE_IEEE_PFC_WILLING_S)
+
+/* Defines for IEEE APP TLV */
+#define ICE_IEEE_APP_SEL_S 0
+#define ICE_IEEE_APP_SEL_M (0x7 << ICE_IEEE_APP_SEL_S)
+#define ICE_IEEE_APP_PRIO_S 5
+#define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S)
+
+/* TLV definitions for preparing MIB */
+#define ICE_IEEE_TLV_ID_ETS_CFG 3
+#define ICE_IEEE_TLV_ID_ETS_REC 4
+#define ICE_IEEE_TLV_ID_PFC_CFG 5
+#define ICE_IEEE_TLV_ID_APP_PRI 6
+#define ICE_TLV_ID_END_OF_LLDPPDU 7
+#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
+
+#define ICE_IEEE_ETS_TLV_LEN 25
+#define ICE_IEEE_PFC_TLV_LEN 6
+#define ICE_IEEE_APP_TLV_LEN 11
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct ice_lldp_org_tlv {
+ __be16 typelen;
+ __be32 ouisubtype;
+ u8 tlvinfo[1];
+} __packed;
+
+struct ice_cee_tlv_hdr {
+ __be16 typelen;
+ u8 operver;
+ u8 maxver;
+};
+
+struct ice_cee_ctrl_tlv {
+ struct ice_cee_tlv_hdr hdr;
+ __be32 seqno;
+ __be32 ackno;
+};
+
+struct ice_cee_feat_tlv {
+ struct ice_cee_tlv_hdr hdr;
+ u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define ICE_CEE_FEAT_TLV_ENA_M 0x80
+#define ICE_CEE_FEAT_TLV_WILLING_M 0x40
+#define ICE_CEE_FEAT_TLV_ERR_M 0x20
+ u8 subtype;
+ u8 tlvinfo[1];
+};
+
+struct ice_cee_app_prio {
+ __be16 protocol;
+ u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define ICE_CEE_APP_SELECTOR_M 0x03
+ __be16 lower_oui;
+ u8 prio_map;
+} __packed;
+
+u8 ice_get_dcbx_status(struct ice_hw *hw);
+enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg);
+enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
+enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
+enum ice_status ice_init_dcb(struct ice_hw *hw);
+enum ice_status
+ice_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cmd_details);
+#ifdef CONFIG_DCB
+enum ice_status
+ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent,
+ struct ice_sq_cd *cd);
+enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
+ bool *dcbx_agent_status, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
+ struct ice_sq_cd *cd);
+#else /* CONFIG_DCB */
+static inline enum ice_status
+ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
+ bool __always_unused shutdown_lldp_agent,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+static inline enum ice_status
+ice_aq_start_lldp(struct ice_hw __always_unused *hw,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+static inline enum ice_status
+ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw,
+ bool __always_unused start_dcbx_agent,
+ bool *dcbx_agent_status,
+ struct ice_sq_cd __always_unused *cd)
+{
+ *dcbx_agent_status = false;
+
+ return 0;
+}
+
+static inline enum ice_status
+ice_aq_cfg_lldp_mib_change(struct ice_hw __always_unused *hw,
+ bool __always_unused ena_update,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+#endif /* CONFIG_DCB */
+#endif /* _ICE_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
new file mode 100644
index 000000000000..3e81af1884fc
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_dcb_lib.h"
+
+/**
+ * ice_dcb_get_ena_tc - return bitmap of enabled TCs
+ * @dcbcfg: DCB config to evaluate for enabled TCs
+ */
+u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 i, num_tc, ena_tc = 1;
+
+ num_tc = ice_dcb_get_num_tc(dcbcfg);
+
+ for (i = 0; i < num_tc; i++)
+ ena_tc |= BIT(i);
+
+ return ena_tc;
+}
+
+/**
+ * ice_dcb_get_num_tc - Get the number of TCs from DCBX config
+ * @dcbcfg: config to retrieve number of TCs from
+ */
+u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
+{
+ bool tc_unused = false;
+ u8 num_tc = 0;
+ u8 ret = 0;
+ int i;
+
+ /* Scan the ETS Config Priority Table to find traffic classes
+ * enabled and create a bitmask of enabled TCs
+ */
+ for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
+ num_tc |= BIT(dcbcfg->etscfg.prio_table[i]);
+
+ /* Scan bitmask for contiguous TCs starting with TC0 */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (num_tc & BIT(i)) {
+ if (!tc_unused) {
+ ret++;
+ } else {
+ pr_err("Non-contiguous TCs - Disabling DCB\n");
+ return 1;
+ }
+ } else {
+ tc_unused = true;
+ }
+ }
+
+ /* There is always at least 1 TC */
+ if (!ret)
+ ret = 1;
+
+ return ret;
+}
+
+/**
+ * ice_vsi_cfg_dcb_rings - Update rings to reflect DCB TC
+ * @vsi: VSI owner of rings being updated
+ */
+void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
+{
+ struct ice_ring *tx_ring, *rx_ring;
+ u16 qoffset, qcount;
+ int i, n;
+
+ if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
+ /* Reset the TC information */
+ for (i = 0; i < vsi->num_txq; i++) {
+ tx_ring = vsi->tx_rings[i];
+ tx_ring->dcb_tc = 0;
+ }
+ for (i = 0; i < vsi->num_rxq; i++) {
+ rx_ring = vsi->rx_rings[i];
+ rx_ring->dcb_tc = 0;
+ }
+ return;
+ }
+
+ ice_for_each_traffic_class(n) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(n)))
+ break;
+
+ qoffset = vsi->tc_cfg.tc_info[n].qoffset;
+ qcount = vsi->tc_cfg.tc_info[n].qcount_tx;
+ for (i = qoffset; i < (qoffset + qcount); i++) {
+ tx_ring = vsi->tx_rings[i];
+ rx_ring = vsi->rx_rings[i];
+ tx_ring->dcb_tc = n;
+ rx_ring->dcb_tc = n;
+ }
+ }
+}
+
+/**
+ * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
+ * @pf: pointer to the PF struct
+ *
+ * Assumed caller has already disabled all VSIs before
+ * calling this function. Reconfiguring DCB based on
+ * local_dcbx_cfg.
+ */
+static void ice_pf_dcb_recfg(struct ice_pf *pf)
+{
+ struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+ u8 tc_map = 0;
+ int v, ret;
+
+ /* Update each VSI */
+ ice_for_each_vsi(pf, v) {
+ if (!pf->vsi[v])
+ continue;
+
+ if (pf->vsi[v]->type == ICE_VSI_PF)
+ tc_map = ice_dcb_get_ena_tc(dcbcfg);
+ else
+ tc_map = ICE_DFLT_TRAFFIC_CLASS;
+
+ ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Failed to config TC for VSI index: %d\n",
+ pf->vsi[v]->idx);
+ else
+ ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ }
+}
+
+/**
+ * ice_pf_dcb_cfg - Apply new DCB configuration
+ * @pf: pointer to the PF struct
+ * @new_cfg: DCBX config to apply
+ */
+static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
+{
+ struct ice_dcbx_cfg *old_cfg, *curr_cfg;
+ struct ice_aqc_port_ets_elem buf = { 0 };
+ int ret = 0;
+
+ curr_cfg = &pf->hw.port_info->local_dcbx_cfg;
+
+ /* Enable DCB tagging only when more than one TC */
+ if (ice_dcb_get_num_tc(new_cfg) > 1) {
+ dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
+ set_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ } else {
+ dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ }
+
+ if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) {
+ dev_dbg(&pf->pdev->dev, "No change in DCB config required\n");
+ return ret;
+ }
+
+ /* Store old config in case FW config fails */
+ old_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL);
+ memcpy(old_cfg, curr_cfg, sizeof(*old_cfg));
+
+ /* avoid race conditions by holding the lock while disabling and
+ * re-enabling the VSI
+ */
+ rtnl_lock();
+ ice_pf_dis_all_vsi(pf, true);
+
+ memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
+ memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
+
+ /* Only send new config to HW if we are in SW LLDP mode. Otherwise,
+ * the new config came from the HW in the first place.
+ */
+ if (pf->hw.port_info->is_sw_lldp) {
+ ret = ice_set_dcb_cfg(pf->hw.port_info);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Set DCB Config failed\n");
+ /* Restore previous settings to local config */
+ memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg));
+ goto out;
+ }
+ }
+
+ ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ goto out;
+ }
+
+ ice_pf_dcb_recfg(pf);
+
+out:
+ ice_pf_ena_all_vsi(pf, true);
+ rtnl_unlock();
+ devm_kfree(&pf->pdev->dev, old_cfg);
+ return ret;
+}
+
+/**
+ * ice_dcb_rebuild - rebuild DCB post reset
+ * @pf: physical function instance
+ */
+void ice_dcb_rebuild(struct ice_pf *pf)
+{
+ struct ice_aqc_port_ets_elem buf = { 0 };
+ struct ice_dcbx_cfg *prev_cfg;
+ enum ice_status ret;
+ u8 willing;
+
+ ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ goto dcb_error;
+ }
+
+ /* If DCB was not enabled previously, we are done */
+ if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ return;
+
+ /* Save current willing state and force FW to unwilling */
+ willing = pf->hw.port_info->local_dcbx_cfg.etscfg.willing;
+ pf->hw.port_info->local_dcbx_cfg.etscfg.willing = 0x0;
+ ret = ice_set_dcb_cfg(pf->hw.port_info);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n");
+ goto dcb_error;
+ }
+
+ /* Retrieve DCB config and ensure same as current in SW */
+ prev_cfg = devm_kmemdup(&pf->pdev->dev,
+ &pf->hw.port_info->local_dcbx_cfg,
+ sizeof(*prev_cfg), GFP_KERNEL);
+ if (!prev_cfg) {
+ dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n");
+ goto dcb_error;
+ }
+
+ ice_init_dcb(&pf->hw);
+ if (memcmp(prev_cfg, &pf->hw.port_info->local_dcbx_cfg,
+ sizeof(*prev_cfg))) {
+ /* difference in cfg detected - disable DCB till next MIB */
+ dev_err(&pf->pdev->dev, "Set local MIB not accurate\n");
+ devm_kfree(&pf->pdev->dev, prev_cfg);
+ goto dcb_error;
+ }
+
+ /* fetched config congruent to previous configuration */
+ devm_kfree(&pf->pdev->dev, prev_cfg);
+
+ /* Configuration replayed - reset willing state to previous */
+ pf->hw.port_info->local_dcbx_cfg.etscfg.willing = willing;
+ ret = ice_set_dcb_cfg(pf->hw.port_info);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Fail restoring prev willing state\n");
+ goto dcb_error;
+ }
+ dev_info(&pf->pdev->dev, "DCB restored after reset\n");
+ ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ goto dcb_error;
+ }
+
+ return;
+
+dcb_error:
+ dev_err(&pf->pdev->dev, "Disabling DCB until new settings occur\n");
+ prev_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*prev_cfg), GFP_KERNEL);
+ prev_cfg->etscfg.willing = true;
+ prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
+ prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
+ memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
+ ice_pf_dcb_cfg(pf, prev_cfg);
+ devm_kfree(&pf->pdev->dev, prev_cfg);
+}
+
+/**
+ * ice_dcb_init_cfg - set the initial DCB config in SW
+ * @pf: pf to apply config to
+ */
+static int ice_dcb_init_cfg(struct ice_pf *pf)
+{
+ struct ice_dcbx_cfg *newcfg;
+ struct ice_port_info *pi;
+ int ret = 0;
+
+ pi = pf->hw.port_info;
+ newcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*newcfg), GFP_KERNEL);
+ if (!newcfg)
+ return -ENOMEM;
+
+ memcpy(newcfg, &pi->local_dcbx_cfg, sizeof(*newcfg));
+ memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
+
+ dev_info(&pf->pdev->dev, "Configuring initial DCB values\n");
+ if (ice_pf_dcb_cfg(pf, newcfg))
+ ret = -EINVAL;
+
+ devm_kfree(&pf->pdev->dev, newcfg);
+
+ return ret;
+}
+
+/**
+ * ice_dcb_sw_default_config - Apply a default DCB config
+ * @pf: pf to apply config to
+ */
+static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf)
+{
+ struct ice_aqc_port_ets_elem buf = { 0 };
+ struct ice_dcbx_cfg *dcbcfg;
+ struct ice_port_info *pi;
+ struct ice_hw *hw;
+ int ret;
+
+ hw = &pf->hw;
+ pi = hw->port_info;
+ dcbcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*dcbcfg), GFP_KERNEL);
+
+ memset(dcbcfg, 0, sizeof(*dcbcfg));
+ memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg));
+
+ dcbcfg->etscfg.willing = 1;
+ dcbcfg->etscfg.maxtcs = 8;
+ dcbcfg->etscfg.tcbwtable[0] = 100;
+ dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
+
+ memcpy(&dcbcfg->etsrec, &dcbcfg->etscfg,
+ sizeof(dcbcfg->etsrec));
+ dcbcfg->etsrec.willing = 0;
+
+ dcbcfg->pfc.willing = 1;
+ dcbcfg->pfc.pfccap = IEEE_8021QAZ_MAX_TCS;
+
+ dcbcfg->numapps = 1;
+ dcbcfg->app[0].selector = ICE_APP_SEL_ETHTYPE;
+ dcbcfg->app[0].priority = 3;
+ dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE;
+
+ ret = ice_pf_dcb_cfg(pf, dcbcfg);
+ devm_kfree(&pf->pdev->dev, dcbcfg);
+ if (ret)
+ return ret;
+
+ return ice_query_port_ets(pi, &buf, sizeof(buf), NULL);
+}
+
+/**
+ * ice_init_pf_dcb - initialize DCB for a PF
+ * @pf: pf to initiialize DCB for
+ */
+int ice_init_pf_dcb(struct ice_pf *pf)
+{
+ struct device *dev = &pf->pdev->dev;
+ struct ice_port_info *port_info;
+ struct ice_hw *hw = &pf->hw;
+ int sw_default = 0;
+ int err;
+
+ port_info = hw->port_info;
+
+ /* check if device is DCB capable */
+ if (!hw->func_caps.common_cap.dcb) {
+ dev_dbg(dev, "DCB not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Best effort to put DCBx and LLDP into a good state */
+ port_info->dcbx_status = ice_get_dcbx_status(hw);
+ if (port_info->dcbx_status != ICE_DCBX_STATUS_DONE &&
+ port_info->dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) {
+ bool dcbx_status;
+
+ /* Attempt to start LLDP engine. Ignore errors
+ * as this will error if it is already started
+ */
+ ice_aq_start_lldp(hw, NULL);
+
+ /* Attempt to start DCBX. Ignore errors as this
+ * will error if it is already started
+ */
+ ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL);
+ }
+
+ err = ice_init_dcb(hw);
+ if (err) {
+ /* FW LLDP not in usable state, default to SW DCBx/LLDP */
+ dev_info(&pf->pdev->dev, "FW LLDP not in usable state\n");
+ hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
+ hw->port_info->is_sw_lldp = true;
+ }
+
+ if (port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
+ dev_info(&pf->pdev->dev, "DCBX disabled\n");
+
+ /* LLDP disabled in FW */
+ if (port_info->is_sw_lldp) {
+ sw_default = 1;
+ dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n");
+ }
+
+ if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
+ sw_default = 1;
+ dev_info(&pf->pdev->dev, "DCBX not started\n");
+ }
+
+ if (sw_default) {
+ err = ice_dcb_sw_dflt_cfg(pf);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set local DCB config %d\n", err);
+ err = -EIO;
+ goto dcb_init_err;
+ }
+
+ pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+ set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+ set_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ return 0;
+ }
+
+ /* DCBX in FW and LLDP enabled in FW */
+ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
+
+ set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+
+ err = ice_dcb_init_cfg(pf);
+ if (err)
+ goto dcb_init_err;
+
+ dev_info(&pf->pdev->dev, "DCBX offload supported\n");
+ return err;
+
+dcb_init_err:
+ dev_err(dev, "DCB init failed\n");
+ return err;
+}
+
+/**
+ * ice_update_dcb_stats - Update DCB stats counters
+ * @pf: PF whose stats needs to be updated
+ */
+void ice_update_dcb_stats(struct ice_pf *pf)
+{
+ struct ice_hw_port_stats *prev_ps, *cur_ps;
+ struct ice_hw *hw = &pf->hw;
+ u8 pf_id = hw->pf_id;
+ int i;
+
+ prev_ps = &pf->stats_prev;
+ cur_ps = &pf->stats;
+
+ for (i = 0; i < 8; i++) {
+ ice_stat_update32(hw, GLPRT_PXOFFRXC(pf_id, i),
+ pf->stat_prev_loaded,
+ &prev_ps->priority_xoff_rx[i],
+ &cur_ps->priority_xoff_rx[i]);
+ ice_stat_update32(hw, GLPRT_PXONRXC(pf_id, i),
+ pf->stat_prev_loaded,
+ &prev_ps->priority_xon_rx[i],
+ &cur_ps->priority_xon_rx[i]);
+ ice_stat_update32(hw, GLPRT_PXONTXC(pf_id, i),
+ pf->stat_prev_loaded,
+ &prev_ps->priority_xon_tx[i],
+ &cur_ps->priority_xon_tx[i]);
+ ice_stat_update32(hw, GLPRT_PXOFFTXC(pf_id, i),
+ pf->stat_prev_loaded,
+ &prev_ps->priority_xoff_tx[i],
+ &cur_ps->priority_xoff_tx[i]);
+ ice_stat_update32(hw, GLPRT_RXON2OFFCNT(pf_id, i),
+ pf->stat_prev_loaded,
+ &prev_ps->priority_xon_2_xoff[i],
+ &cur_ps->priority_xon_2_xoff[i]);
+ }
+}
+
+/**
+ * ice_tx_prepare_vlan_flags_dcb - prepare VLAN tagging for DCB
+ * @tx_ring: ring to send buffer on
+ * @first: pointer to struct ice_tx_buf
+ */
+int
+ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
+ struct ice_tx_buf *first)
+{
+ struct sk_buff *skb = first->skb;
+
+ if (!test_bit(ICE_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
+ return 0;
+
+ /* Insert 802.1p priority into VLAN header */
+ if ((first->tx_flags & (ICE_TX_FLAGS_HW_VLAN | ICE_TX_FLAGS_SW_VLAN)) ||
+ skb->priority != TC_PRIO_CONTROL) {
+ first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
+ /* Mask the lower 3 bits to set the 802.1p priority */
+ first->tx_flags |= (skb->priority & 0x7) <<
+ ICE_TX_FLAGS_VLAN_PR_S;
+ if (first->tx_flags & ICE_TX_FLAGS_SW_VLAN) {
+ struct vlan_ethhdr *vhdr;
+ int rc;
+
+ rc = skb_cow_head(skb, 0);
+ if (rc < 0)
+ return rc;
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI = htons(first->tx_flags >>
+ ICE_TX_FLAGS_VLAN_S);
+ } else {
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_dcb_process_lldp_set_mib_change - Process MIB change
+ * @pf: ptr to ice_pf
+ * @event: pointer to the admin queue receive event
+ */
+void
+ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
+ struct ice_rq_event_info *event)
+{
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
+ struct ice_dcbx_cfg *dcbcfg, *prev_cfg;
+ int err;
+
+ prev_cfg = &pf->hw.port_info->local_dcbx_cfg;
+ dcbcfg = devm_kmemdup(&pf->pdev->dev, prev_cfg,
+ sizeof(*dcbcfg), GFP_KERNEL);
+ if (!dcbcfg)
+ return;
+
+ err = ice_lldp_to_dcb_cfg(event->msg_buf, dcbcfg);
+ if (!err)
+ ice_pf_dcb_cfg(pf, dcbcfg);
+
+ devm_kfree(&pf->pdev->dev, dcbcfg);
+
+ /* Get updated DCBx data from firmware */
+ err = ice_get_dcb_cfg(pf->hw.port_info);
+ if (err)
+ dev_err(&pf->pdev->dev,
+ "Failed to get DCB config\n");
+ } else {
+ dev_dbg(&pf->pdev->dev,
+ "MIB Change Event in HOST mode\n");
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
new file mode 100644
index 000000000000..ca7b76faa03c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_DCB_LIB_H_
+#define _ICE_DCB_LIB_H_
+
+#include "ice.h"
+#include "ice_lib.h"
+
+#ifdef CONFIG_DCB
+#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */
+
+void ice_dcb_rebuild(struct ice_pf *pf);
+u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
+u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
+void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
+int ice_init_pf_dcb(struct ice_pf *pf);
+void ice_update_dcb_stats(struct ice_pf *pf);
+int
+ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
+ struct ice_tx_buf *first);
+void
+ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
+ struct ice_rq_event_info *event);
+static inline void
+ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
+{
+ tlan_ctx->cgd_num = ring->dcb_tc;
+}
+#else
+#define ice_dcb_rebuild(pf) do {} while (0)
+
+static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
+{
+ return ICE_DFLT_TRAFFIC_CLASS;
+}
+
+static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
+{
+ return 1;
+}
+
+static inline int ice_init_pf_dcb(struct ice_pf *pf)
+{
+ dev_dbg(&pf->pdev->dev, "DCB not supported\n");
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
+ struct ice_tx_buf __always_unused *first)
+{
+ return 0;
+}
+
+#define ice_update_dcb_stats(pf) do {} while (0)
+#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
+#define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0)
+#define ice_set_cgd_num(tlan_ctx, ring) do {} while (0)
+#endif /* CONFIG_DCB */
+#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index eb8d149e317c..1341fde8d53f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -4,6 +4,8 @@
/* ethtool support for ice */
#include "ice.h"
+#include "ice_lib.h"
+#include "ice_dcb_lib.h"
struct ice_stats {
char stat_string[ETH_GSTRING_LEN];
@@ -33,8 +35,14 @@ static int ice_q_stats_len(struct net_device *netdev)
#define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
#define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
-#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \
- ice_q_stats_len(n))
+#define ICE_PFC_STATS_LEN ( \
+ (FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_rx) + \
+ FIELD_SIZEOF(struct ice_pf, stats.priority_xon_rx) + \
+ FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_tx) + \
+ FIELD_SIZEOF(struct ice_pf, stats.priority_xon_tx)) \
+ / sizeof(u64))
+#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \
+ ICE_VSI_STATS_LEN + ice_q_stats_len(n))
static const struct ice_stats ice_gstrings_vsi_stats[] = {
ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
@@ -126,6 +134,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
+ ICE_PRIV_FLAG("disable-fw-lldp", ICE_FLAG_DISABLE_FW_LLDP),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -309,6 +318,22 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx-priority-%u-xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx-priority-%u-xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx-priority-%u-xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx-priority-%u-xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
@@ -382,13 +407,19 @@ static u32 ice_get_priv_flags(struct net_device *netdev)
static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ DECLARE_BITMAP(change_flags, ICE_PF_FLAGS_NBITS);
+ DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ int ret = 0;
u32 i;
if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE))
return -EINVAL;
+ set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
+
+ bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS);
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
const struct ice_priv_flag *priv_flag;
@@ -400,7 +431,79 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
clear_bit(priv_flag->bitno, pf->flags);
}
- return 0;
+ bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
+
+ if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, change_flags)) {
+ if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, pf->flags)) {
+ enum ice_status status;
+
+ status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
+ NULL);
+ /* If unregistering for LLDP events fails, this is
+ * not an error state, as there shouldn't be any
+ * events to respond to.
+ */
+ if (status)
+ dev_info(&pf->pdev->dev,
+ "Failed to unreg for LLDP events\n");
+
+ /* The AQ call to stop the FW LLDP agent will generate
+ * an error if the agent is already stopped.
+ */
+ status = ice_aq_stop_lldp(&pf->hw, true, NULL);
+ if (status)
+ dev_warn(&pf->pdev->dev,
+ "Fail to stop LLDP agent\n");
+ /* Use case for having the FW LLDP agent stopped
+ * will likely not need DCB, so failure to init is
+ * not a concern of ethtool
+ */
+ status = ice_init_pf_dcb(pf);
+ if (status)
+ dev_warn(&pf->pdev->dev, "Fail to init DCB\n");
+ } else {
+ enum ice_status status;
+ bool dcbx_agent_status;
+
+ /* AQ command to start FW LLDP agent will return an
+ * error if the agent is already started
+ */
+ status = ice_aq_start_lldp(&pf->hw, NULL);
+ if (status)
+ dev_warn(&pf->pdev->dev,
+ "Fail to start LLDP Agent\n");
+
+ /* AQ command to start FW DCBx agent will fail if
+ * the agent is already started
+ */
+ status = ice_aq_start_stop_dcbx(&pf->hw, true,
+ &dcbx_agent_status,
+ NULL);
+ if (status)
+ dev_dbg(&pf->pdev->dev,
+ "Failed to start FW DCBX\n");
+
+ dev_info(&pf->pdev->dev, "FW DCBX agent is %s\n",
+ dcbx_agent_status ? "ACTIVE" : "DISABLED");
+
+ /* Failure to configure MIB change or init DCB is not
+ * relevant to ethtool. Print notification that
+ * registration/init failed but do not return error
+ * state to ethtool
+ */
+ status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
+ NULL);
+ if (status)
+ dev_dbg(&pf->pdev->dev,
+ "Fail to reg for MIB change\n");
+
+ status = ice_init_pf_dcb(pf);
+ if (status)
+ dev_dbg(&pf->pdev->dev, "Fail to init DCB\n");
+ }
+ }
+ clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
+ return ret;
}
static int ice_get_sset_count(struct net_device *netdev, int sset)
@@ -486,6 +589,16 @@ ice_get_ethtool_stats(struct net_device *netdev,
data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+
+ for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_tx[j];
+ data[i++] = pf->stats.priority_xoff_tx[j];
+ }
+
+ for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_rx[j];
+ data[i++] = pf->stats.priority_xoff_rx[j];
+ }
}
/**
@@ -811,7 +924,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
link_info = &vsi->port_info->phy.link_info;
- /* Initialize supported and advertised settings based on phy settings */
+ /* Initialize supported and advertised settings based on PHY settings */
switch (link_info->phy_type_low) {
case ICE_PHY_TYPE_LOW_100BASE_TX:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
@@ -921,6 +1034,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
25000baseCR_Full);
break;
case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseCR_Full);
break;
@@ -1137,10 +1251,10 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
*/
static void
ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
- struct net_device __always_unused *netdev)
+ struct net_device *netdev)
{
/* link is down and the driver needs to fall back on
- * supported phy types to figure out what info to display
+ * supported PHY types to figure out what info to display
*/
ice_phy_type_to_ethtool(netdev, ks);
@@ -1156,8 +1270,9 @@ ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
*
* Reports speed/duplex settings based on media_type
*/
-static int ice_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *ks)
+static int
+ice_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
@@ -1349,7 +1464,7 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
} else {
/* If autoneg is currently enabled */
if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) {
- /* If autoneg is supported 10GBASE_T is the only phy
+ /* If autoneg is supported 10GBASE_T is the only PHY
* that can disable it, so otherwise return error
*/
if (ethtool_link_ksettings_test_link_mode(ks,
@@ -1399,14 +1514,13 @@ ice_set_link_ksettings(struct net_device *netdev,
if (!p)
return -EOPNOTSUPP;
- /* Check if this is lan vsi */
- for (idx = 0 ; idx < pf->num_alloc_vsi ; idx++) {
+ /* Check if this is LAN VSI */
+ ice_for_each_vsi(pf, idx)
if (pf->vsi[idx]->type == ICE_VSI_PF) {
if (np->vsi != pf->vsi[idx])
return -EOPNOTSUPP;
break;
}
- }
if (p->phy.media_type != ICE_MEDIA_BASET &&
p->phy.media_type != ICE_MEDIA_FIBER &&
@@ -1464,7 +1578,7 @@ ice_set_link_ksettings(struct net_device *netdev,
if (!abilities)
return -ENOMEM;
- /* Get the current phy config */
+ /* Get the current PHY config */
status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities,
NULL);
if (status) {
@@ -1559,15 +1673,16 @@ done:
}
/**
- * ice_get_rxnfc - command to get RX flow classification rules
+ * ice_get_rxnfc - command to get Rx flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
* @rule_locs: buffer to rturn Rx flow classification rules
*
* Returns Success if the command is supported.
*/
-static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 __always_unused *rule_locs)
+static int
+ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 __always_unused *rule_locs)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -1821,18 +1936,21 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_port_info *pi = np->vsi->port_info;
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_vsi *vsi = np->vsi;
+ struct ice_dcbx_cfg *dcbx_cfg;
enum ice_status status;
/* Initialize pause params */
pause->rx_pause = 0;
pause->tx_pause = 0;
+ dcbx_cfg = &pi->local_dcbx_cfg;
+
pcaps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*pcaps),
GFP_KERNEL);
if (!pcaps)
return;
- /* Get current phy config */
+ /* Get current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (status)
@@ -1841,6 +1959,10 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
+ if (dcbx_cfg->pfc.pfcena)
+ /* PFC enabled so report LFC as off */
+ goto out;
+
if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
pause->tx_pause = 1;
if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
@@ -1861,6 +1983,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
struct ice_pf *pf = np->vsi->back;
+ struct ice_dcbx_cfg *dcbx_cfg;
struct ice_vsi *vsi = np->vsi;
struct ice_hw *hw = &pf->hw;
struct ice_port_info *pi;
@@ -1871,6 +1994,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
+ dcbx_cfg = &pi->local_dcbx_cfg;
link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
/* Changing the port's flow control is not supported if this isn't the
@@ -1893,6 +2017,10 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
}
+ if (dcbx_cfg->pfc.pfcena) {
+ netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n");
+ return -EOPNOTSUPP;
+ }
if (pause->rx_pause && pause->tx_pause)
pi->fc.req_mode = ICE_FC_FULL;
else if (pause->rx_pause && !pause->tx_pause)
@@ -2021,11 +2149,12 @@ out:
* @key: hash key
* @hfunc: hash function
*
- * Returns -EINVAL if the table specifies an invalid queue id, otherwise
+ * Returns -EINVAL if the table specifies an invalid queue ID, otherwise
* returns 0 after programming the table.
*/
-static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int
+ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
+ const u8 hfunc)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -2087,7 +2216,7 @@ enum ice_container_type {
/**
* ice_get_rc_coalesce - get ITR values for specific ring container
* @ec: ethtool structure to fill with driver's coalesce settings
- * @c_type: container type, RX or TX
+ * @c_type: container type, Rx or Tx
* @rc: ring container that the ITR values will come from
*
* Query the device for ice_ring_container specific ITR values. This is
@@ -2100,12 +2229,18 @@ static int
ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
struct ice_ring_container *rc)
{
- struct ice_pf *pf = rc->ring->vsi->back;
+ struct ice_pf *pf;
+
+ if (!rc->ring)
+ return -EINVAL;
+
+ pf = rc->ring->vsi->back;
switch (c_type) {
case ICE_RX_CONTAINER:
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
+ ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl;
break;
case ICE_TX_CONTAINER:
ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
@@ -2120,49 +2255,60 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
}
/**
+ * ice_get_q_coalesce - get a queue's ITR/INTRL (coalesce) settings
+ * @vsi: VSI associated to the queue for getting ITR/INTRL (coalesce) settings
+ * @ec: coalesce settings to program the device with
+ * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
+ *
+ * Return 0 on success, and negative under the following conditions:
+ * 1. Getting Tx or Rx ITR/INTRL (coalesce) settings failed.
+ * 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
+ */
+static int
+ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
+{
+ if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
+ if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ &vsi->rx_rings[q_num]->q_vector->rx))
+ return -EINVAL;
+ if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ &vsi->tx_rings[q_num]->q_vector->tx))
+ return -EINVAL;
+ } else if (q_num < vsi->num_rxq) {
+ if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ &vsi->rx_rings[q_num]->q_vector->rx))
+ return -EINVAL;
+ } else if (q_num < vsi->num_txq) {
+ if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ &vsi->tx_rings[q_num]->q_vector->tx))
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* __ice_get_coalesce - get ITR/INTRL values for the device
* @netdev: pointer to the netdev associated with this query
* @ec: ethtool structure to fill with driver's coalesce settings
* @q_num: queue number to get the coalesce settings for
+ *
+ * If the caller passes in a negative q_num then we return coalesce settings
+ * based on queue number 0, else use the actual q_num passed in.
*/
static int
__ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
int q_num)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- int tx = -EINVAL, rx = -EINVAL;
struct ice_vsi *vsi = np->vsi;
- if (q_num < 0) {
- rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
- &vsi->rx_rings[0]->q_vector->rx);
- tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
- &vsi->tx_rings[0]->q_vector->tx);
-
- goto update_coalesced_frames;
- }
-
- if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
- &vsi->rx_rings[q_num]->q_vector->rx);
- tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
- &vsi->tx_rings[q_num]->q_vector->tx);
- } else if (q_num < vsi->num_rxq) {
- rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
- &vsi->rx_rings[q_num]->q_vector->rx);
- } else if (q_num < vsi->num_txq) {
- tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
- &vsi->tx_rings[q_num]->q_vector->tx);
- } else {
- /* q_num is invalid for both Rx and Tx queues */
- return -EINVAL;
- }
+ if (q_num < 0)
+ q_num = 0;
-update_coalesced_frames:
- /* either q_num is invalid for both Rx and Tx queues or setting coalesce
- * failed completely
- */
- if (tx && rx)
+ if (ice_get_q_coalesce(vsi, ec, q_num))
return -EINVAL;
if (q_num < vsi->num_txq)
@@ -2180,15 +2326,16 @@ ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
return __ice_get_coalesce(netdev, ec, -1);
}
-static int ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
- struct ethtool_coalesce *ec)
+static int
+ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
{
return __ice_get_coalesce(netdev, ec, q_num);
}
/**
* ice_set_rc_coalesce - set ITR values for specific ring container
- * @c_type: container type, RX or TX
+ * @c_type: container type, Rx or Tx
* @ec: ethtool structure from user to update ITR settings
* @rc: ring container that the ITR values will come from
* @vsi: VSI associated to the ring container
@@ -2213,6 +2360,23 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
switch (c_type) {
case ICE_RX_CONTAINER:
+ if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
+ (ec->rx_coalesce_usecs_high &&
+ ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
+ netdev_info(vsi->netdev,
+ "Invalid value, rx-usecs-high valid values are 0 (disabled), %d-%d\n",
+ pf->hw.intrl_gran, ICE_MAX_INTRL);
+ return -EINVAL;
+ }
+
+ if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) {
+ rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high;
+ wr32(&pf->hw, GLINT_RATE(vsi->hw_base_vector +
+ rc->ring->q_vector->v_idx),
+ ice_intrl_usec_to_reg(ec->rx_coalesce_usecs_high,
+ pf->hw.intrl_gran));
+ }
+
if (ec->rx_coalesce_usecs != itr_setting &&
ec->use_adaptive_rx_coalesce) {
netdev_info(vsi->netdev,
@@ -2235,6 +2399,12 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
}
break;
case ICE_TX_CONTAINER:
+ if (ec->tx_coalesce_usecs_high) {
+ netdev_info(vsi->netdev,
+ "setting tx-usecs-high is not supported\n");
+ return -EINVAL;
+ }
+
if (ec->tx_coalesce_usecs != itr_setting &&
ec->use_adaptive_tx_coalesce) {
netdev_info(vsi->netdev,
@@ -2264,54 +2434,77 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
return 0;
}
+/**
+ * ice_set_q_coalesce - set a queue's ITR/INTRL (coalesce) settings
+ * @vsi: VSI associated to the queue that need updating
+ * @ec: coalesce settings to program the device with
+ * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
+ *
+ * Return 0 on success, and negative under the following conditions:
+ * 1. Setting Tx or Rx ITR/INTRL (coalesce) settings failed.
+ * 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
+ */
+static int
+ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
+{
+ if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
+ if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ &vsi->rx_rings[q_num]->q_vector->rx,
+ vsi))
+ return -EINVAL;
+
+ if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ &vsi->tx_rings[q_num]->q_vector->tx,
+ vsi))
+ return -EINVAL;
+ } else if (q_num < vsi->num_rxq) {
+ if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ &vsi->rx_rings[q_num]->q_vector->rx,
+ vsi))
+ return -EINVAL;
+ } else if (q_num < vsi->num_txq) {
+ if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ &vsi->tx_rings[q_num]->q_vector->tx,
+ vsi))
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * __ice_set_coalesce - set ITR/INTRL values for the device
+ * @netdev: pointer to the netdev associated with this query
+ * @ec: ethtool structure to fill with driver's coalesce settings
+ * @q_num: queue number to get the coalesce settings for
+ *
+ * If the caller passes in a negative q_num then we set the coalesce settings
+ * for all Tx/Rx queues, else use the actual q_num passed in.
+ */
static int
__ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
int q_num)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- int rx = -EINVAL, tx = -EINVAL;
struct ice_vsi *vsi = np->vsi;
if (q_num < 0) {
int i;
ice_for_each_q_vector(vsi, i) {
- struct ice_q_vector *q_vector = vsi->q_vectors[i];
-
- if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
- &q_vector->rx, vsi) ||
- ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
- &q_vector->tx, vsi))
+ if (ice_set_q_coalesce(vsi, ec, i))
return -EINVAL;
}
-
goto set_work_lmt;
}
- if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
- &vsi->rx_rings[q_num]->q_vector->rx,
- vsi);
- tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
- &vsi->tx_rings[q_num]->q_vector->tx,
- vsi);
- } else if (q_num < vsi->num_rxq) {
- rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
- &vsi->rx_rings[q_num]->q_vector->rx,
- vsi);
- } else if (q_num < vsi->num_txq) {
- tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
- &vsi->tx_rings[q_num]->q_vector->tx,
- vsi);
- }
-
- /* either q_num is invalid for both Rx and Tx queues or setting coalesce
- * failed completely
- */
- if (rx && tx)
+ if (ice_set_q_coalesce(vsi, ec, q_num))
return -EINVAL;
set_work_lmt:
+
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_lmt = max(ec->tx_max_coalesced_frames_irq,
ec->rx_max_coalesced_frames_irq);
@@ -2325,8 +2518,9 @@ ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
return __ice_set_coalesce(netdev, ec, -1);
}
-static int ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
- struct ethtool_coalesce *ec)
+static int
+ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
{
return __ice_set_coalesce(netdev, ec, q_num);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 6bf5cc064270..ec25f26069b0 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -49,6 +49,9 @@
#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300
+#define PRTDCB_GENS 0x00083020
+#define PRTDCB_GENS_DCBX_STATUS_S 0
+#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
@@ -106,6 +109,16 @@
#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
#define PFHMC_ERRORDATA 0x00520500
#define PFHMC_ERRORINFO 0x00520400
+#define GLINT_CTL 0x0016CC54
+#define GLINT_CTL_DIS_AUTOMASK_M BIT(0)
+#define GLINT_CTL_ITR_GRAN_200_S 16
+#define GLINT_CTL_ITR_GRAN_200_M ICE_M(0xF, 16)
+#define GLINT_CTL_ITR_GRAN_100_S 20
+#define GLINT_CTL_ITR_GRAN_100_M ICE_M(0xF, 20)
+#define GLINT_CTL_ITR_GRAN_50_S 24
+#define GLINT_CTL_ITR_GRAN_50_M ICE_M(0xF, 24)
+#define GLINT_CTL_ITR_GRAN_25_S 28
+#define GLINT_CTL_ITR_GRAN_25_M ICE_M(0xF, 28)
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
#define GLINT_DYN_CTL_INTENA_M BIT(0)
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
@@ -150,11 +163,15 @@
#define PFINT_OICR_ENA 0x0016C900
#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
#define QINT_RQCTL_MSIX_INDX_S 0
+#define QINT_RQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define QINT_RQCTL_ITR_INDX_S 11
+#define QINT_RQCTL_ITR_INDX_M ICE_M(0x3, 11)
#define QINT_RQCTL_CAUSE_ENA_M BIT(30)
#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4))
#define QINT_TQCTL_MSIX_INDX_S 0
+#define QINT_TQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define QINT_TQCTL_ITR_INDX_S 11
+#define QINT_TQCTL_ITR_INDX_M ICE_M(0x3, 11)
#define QINT_TQCTL_CAUSE_ENA_M BIT(30)
#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4))
#define VPINT_ALLOC_FIRST_S 0
@@ -168,6 +185,8 @@
#define VPINT_ALLOC_PCI_LAST_S 12
#define VPINT_ALLOC_PCI_LAST_M ICE_M(0x7FF, 12)
#define VPINT_ALLOC_PCI_VALID_M BIT(31)
+#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4))
+#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30)
#define GLLAN_RCTL_0 0x002941F8
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
@@ -306,11 +325,16 @@
#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
+#define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64))
+#define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64))
+#define GLPRT_PXONRXC(_i, _j) (0x00380300 + ((_i) * 8 + (_j) * 64))
+#define GLPRT_PXONTXC(_i, _j) (0x00380D40 + ((_i) * 8 + (_j) * 64))
#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8))
#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8))
#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8))
#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8))
#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
+#define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64))
#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index ef4c79b5aa32..510a8c900e61 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -20,7 +20,7 @@ union ice_32byte_rx_desc {
} lo_dword;
union {
__le32 rss; /* RSS Hash */
- __le32 fd_id; /* Flow Director filter id */
+ __le32 fd_id; /* Flow Director filter ID */
} hi_dword;
} qword0;
struct {
@@ -99,7 +99,7 @@ enum ice_rx_ptype_payload_layer {
ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
};
-/* RX Flex Descriptor
+/* Rx Flex Descriptor
* This descriptor is used instead of the legacy version descriptor when
* ice_rlan_ctx.adv_desc is set
*/
@@ -113,7 +113,7 @@ union ice_32b_rx_flex_desc {
} read;
struct {
/* Qword 0 */
- u8 rxdid; /* descriptor builder profile id */
+ u8 rxdid; /* descriptor builder profile ID */
u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
__le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
__le16 pkt_len; /* [15:14] are reserved */
@@ -149,7 +149,7 @@ union ice_32b_rx_flex_desc {
/* Rx Flex Descriptor NIC Profile
* This descriptor corresponds to RxDID 2 which contains
- * metadata fields for RSS, flow id and timestamp info
+ * metadata fields for RSS, flow ID and timestamp info
*/
struct ice_32b_rx_flex_desc_nic {
/* Qword 0 */
@@ -208,23 +208,23 @@ enum ice_flex_rx_mdid {
ICE_RX_MDID_HASH_HIGH,
};
-/* Rx Flag64 packet flag bits */
-enum ice_rx_flg64_bits {
- ICE_RXFLG_PKT_DSI = 0,
- ICE_RXFLG_EVLAN_x8100 = 15,
- ICE_RXFLG_EVLAN_x9100,
- ICE_RXFLG_VLAN_x8100,
- ICE_RXFLG_TNL_MAC = 22,
- ICE_RXFLG_TNL_VLAN,
- ICE_RXFLG_PKT_FRG,
- ICE_RXFLG_FIN = 32,
- ICE_RXFLG_SYN,
- ICE_RXFLG_RST,
- ICE_RXFLG_TNL0 = 38,
- ICE_RXFLG_TNL1,
- ICE_RXFLG_TNL2,
- ICE_RXFLG_UDP_GRE,
- ICE_RXFLG_RSVD = 63
+/* Rx/Tx Flag64 packet flag bits */
+enum ice_flg64_bits {
+ ICE_FLG_PKT_DSI = 0,
+ ICE_FLG_EVLAN_x8100 = 15,
+ ICE_FLG_EVLAN_x9100,
+ ICE_FLG_VLAN_x8100,
+ ICE_FLG_TNL_MAC = 22,
+ ICE_FLG_TNL_VLAN,
+ ICE_FLG_PKT_FRG,
+ ICE_FLG_FIN = 32,
+ ICE_FLG_SYN,
+ ICE_FLG_RST,
+ ICE_FLG_TNL0 = 38,
+ ICE_FLG_TNL1,
+ ICE_FLG_TNL2,
+ ICE_FLG_UDP_GRE,
+ ICE_FLG_RSVD = 63
};
/* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */
@@ -322,7 +322,7 @@ enum ice_rlan_ctx_rx_hsplit_1 {
ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2,
};
-/* TX Descriptor */
+/* Tx Descriptor */
struct ice_tx_desc {
__le64 buf_addr; /* Address of descriptor's data buf */
__le64 cmd_type_offset_bsz;
@@ -342,12 +342,12 @@ enum ice_tx_desc_cmd_bits {
ICE_TX_DESC_CMD_EOP = 0x0001,
ICE_TX_DESC_CMD_RS = 0x0002,
ICE_TX_DESC_CMD_IL2TAG1 = 0x0008,
- ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
- ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
- ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
- ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
- ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
- ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020,
+ ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040,
+ ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060,
+ ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100,
+ ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200,
+ ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300,
};
#define ICE_TXD_QW1_OFFSET_S 16
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index fa61203bee26..fbf1eba0cc2a 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3,6 +3,7 @@
#include "ice.h"
#include "ice_lib.h"
+#include "ice_dcb_lib.h"
/**
* ice_setup_rx_ctx - Configure a receive ring context
@@ -73,7 +74,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
QRXFLXP_CNTXT_RXDID_IDX_M;
- /* increasing context priority to pick up profile id;
+ /* increasing context priority to pick up profile ID;
* default is 0x01; setting to 0x03 to ensure profile
* is programming if prev context is of same priority
*/
@@ -124,6 +125,8 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
/* Transmit Queue Length */
tlan_ctx->qlen = ring->count;
+ ice_set_cgd_num(tlan_ctx, ring);
+
/* PF number */
tlan_ctx->pf_num = hw->pf_id;
@@ -138,7 +141,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
case ICE_VSI_VF:
- /* Firmware expects vmvf_num to be absolute VF id */
+ /* Firmware expects vmvf_num to be absolute VF ID */
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
@@ -175,17 +178,14 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
int i;
for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
- u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
-
- if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
- break;
+ if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
+ QRX_CTRL_QENA_STAT_M))
+ return 0;
usleep_range(20, 40);
}
- if (i >= ICE_Q_WAIT_MAX_RETRY)
- return -ETIMEDOUT;
- return 0;
+ return -ETIMEDOUT;
}
/**
@@ -197,19 +197,13 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- int i, j, ret = 0;
+ int i, ret = 0;
for (i = 0; i < vsi->num_rxq; i++) {
int pf_q = vsi->rxq_map[i];
u32 rx_reg;
- for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {
- rx_reg = rd32(hw, QRX_CTRL(pf_q));
- if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==
- ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))
- break;
- usleep_range(1000, 2000);
- }
+ rx_reg = rd32(hw, QRX_CTRL(pf_q));
/* Skip if the queue is already in the requested state */
if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
@@ -238,12 +232,11 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
/**
* ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
* @vsi: VSI pointer
- * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
*
* On error: returns error code (negative)
* On success: returns 0
*/
-static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
+static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
@@ -258,15 +251,11 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
if (!vsi->rx_rings)
goto err_rxrings;
- if (alloc_qvectors) {
- /* allocate memory for q_vector pointers */
- vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
- vsi->num_q_vectors,
- sizeof(*vsi->q_vectors),
- GFP_KERNEL);
- if (!vsi->q_vectors)
- goto err_vectors;
- }
+ /* allocate memory for q_vector pointers */
+ vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors,
+ sizeof(*vsi->q_vectors), GFP_KERNEL);
+ if (!vsi->q_vectors)
+ goto err_vectors;
return 0;
@@ -279,25 +268,49 @@ err_txrings:
}
/**
- * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
+ * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
* @vsi: the VSI being configured
+ */
+static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
+{
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
+ vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
+ break;
+ default:
+ dev_dbg(&vsi->back->pdev->dev,
+ "Not setting number of Tx/Rx descriptors for VSI type %d\n",
+ vsi->type);
+ break;
+ }
+}
+
+/**
+ * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
+ * @vsi: the VSI being configured
+ * @vf_id: ID of the VF being configured
*
* Return 0 on success and a negative value on error
*/
-static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
+static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
{
struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf = NULL;
+
+ if (vsi->type == ICE_VSI_VF)
+ vsi->vf_id = vf_id;
switch (vsi->type) {
case ICE_VSI_PF:
vsi->alloc_txq = pf->num_lan_tx;
vsi->alloc_rxq = pf->num_lan_rx;
- vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
break;
case ICE_VSI_VF:
- vsi->alloc_txq = pf->num_vf_qps;
- vsi->alloc_rxq = pf->num_vf_qps;
+ vf = &pf->vf[vsi->vf_id];
+ vsi->alloc_txq = vf->num_vf_qs;
+ vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_vf_msix includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 from the original vector
@@ -306,10 +319,11 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->num_q_vectors = pf->num_vf_msix - 1;
break;
default:
- dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
- vsi->type);
+ dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
break;
}
+
+ ice_vsi_set_num_desc(vsi);
}
/**
@@ -370,16 +384,15 @@ void ice_vsi_delete(struct ice_vsi *vsi)
}
/**
- * ice_vsi_free_arrays - clean up VSI resources
+ * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
* @vsi: pointer to VSI being cleared
- * @free_qvectors: bool to specify if q_vectors should be deallocated
*/
-static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
+static void ice_vsi_free_arrays(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
/* free the ring and vector containers */
- if (free_qvectors && vsi->q_vectors) {
+ if (vsi->q_vectors) {
devm_kfree(&pf->pdev->dev, vsi->q_vectors);
vsi->q_vectors = NULL;
}
@@ -427,7 +440,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
if (vsi->idx < pf->next_vsi)
pf->next_vsi = vsi->idx;
- ice_vsi_free_arrays(vsi, true);
+ ice_vsi_free_arrays(vsi);
mutex_unlock(&pf->sw_mutex);
devm_kfree(&pf->pdev->dev, vsi);
@@ -455,10 +468,12 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
* @type: type of VSI
+ * @vf_id: ID of the VF being configured
*
* returns a pointer to a VSI on success, NULL on failure.
*/
-static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
+static struct ice_vsi *
+ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
{
struct ice_vsi *vsi = NULL;
@@ -484,18 +499,21 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
vsi->idx = pf->next_vsi;
vsi->work_lmt = ICE_DFLT_IRQ_WORK;
- ice_vsi_set_num_qs(vsi);
+ if (type == ICE_VSI_VF)
+ ice_vsi_set_num_qs(vsi, vf_id);
+ else
+ ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
switch (vsi->type) {
case ICE_VSI_PF:
- if (ice_vsi_alloc_arrays(vsi, true))
+ if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
case ICE_VSI_VF:
- if (ice_vsi_alloc_arrays(vsi, true))
+ if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
break;
default:
@@ -547,7 +565,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
/**
* __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
- * @qs_cfg: gathered variables needed for PF->VSI queues assignment
+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment
*
* Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/
@@ -579,11 +597,10 @@ err_scatter:
/**
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
- * @qs_cfg: gathered variables needed for PF->VSI queues assignment
+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment
*
- * This is an internal function for assigning queues from the PF to VSI and
- * initially tries to find contiguous space. If it is not successful to find
- * contiguous space, then it tries with the scatter approach.
+ * This function first tries to find contiguous space. If it is not successful,
+ * it tries with the scatter approach.
*
* Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/
@@ -827,7 +844,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
/* find the (rounded up) power-of-2 of qcount */
pow = order_base_2(qcount_rx);
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ ice_for_each_traffic_class(i) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
/* TC is not enabled */
vsi->tc_cfg.tc_info[i].qoffset = 0;
@@ -852,7 +869,18 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
tx_count += tx_numq_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
}
- vsi->num_rxq = offset;
+
+ /* if offset is non-zero, means it is calculated correctly based on
+ * enabled TCs for a given VSI otherwise qcount_rx will always
+ * be correct and non-zero because it is based off - VSI's
+ * allocated Rx queues which is at least 1 (hence qcount_tx will be
+ * at least 1)
+ */
+ if (offset)
+ vsi->num_rxq = offset;
+ else
+ vsi->num_rxq = qcount_rx;
+
vsi->num_txq = tx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
@@ -881,6 +909,9 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
{
u8 lut_type, hash_type;
+ struct ice_pf *pf;
+
+ pf = vsi->back;
switch (vsi->type) {
case ICE_VSI_PF:
@@ -894,8 +925,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
default:
- dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
- vsi->type);
+ dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
return;
}
@@ -923,6 +953,7 @@ static int ice_vsi_init(struct ice_vsi *vsi)
if (!ctxt)
return -ENOMEM;
+ ctxt->info = vsi->info;
switch (vsi->type) {
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
@@ -948,6 +979,14 @@ static int ice_vsi_init(struct ice_vsi *vsi)
ctxt->info.sw_id = vsi->port_info->sw_id;
ice_vsi_setup_q_map(vsi, ctxt);
+ /* Enable MAC Antispoof with new VSI being initialized or updated */
+ if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
+ ctxt->info.valid_sections |=
+ cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+ ctxt->info.sec_flags |=
+ ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ }
+
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(&pf->pdev->dev,
@@ -973,10 +1012,11 @@ static int ice_vsi_init(struct ice_vsi *vsi)
static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
{
struct ice_q_vector *q_vector;
+ struct ice_pf *pf = vsi->back;
struct ice_ring *ring;
if (!vsi->q_vectors[v_idx]) {
- dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
+ dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n",
v_idx);
return;
}
@@ -991,7 +1031,7 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
if (vsi->netdev)
netif_napi_del(&q_vector->napi);
- devm_kfree(&vsi->back->pdev->dev, q_vector);
+ devm_kfree(&pf->pdev->dev, q_vector);
vsi->q_vectors[v_idx] = NULL;
}
@@ -1003,7 +1043,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
{
int v_idx;
- for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ ice_for_each_q_vector(vsi, v_idx)
ice_free_q_vector(vsi, v_idx);
}
@@ -1143,8 +1183,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors, vsi->idx);
break;
default:
- dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
- vsi->type);
+ dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
break;
}
@@ -1153,7 +1192,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
"Failed to get tracking for %d HW vectors for VSI %d, err=%d\n",
num_q_vectors, vsi->vsi_num, vsi->hw_base_vector);
if (vsi->type != ICE_VSI_VF) {
- ice_free_res(vsi->back->sw_irq_tracker,
+ ice_free_res(pf->sw_irq_tracker,
vsi->sw_base_vector, vsi->idx);
pf->num_avail_sw_msix += num_q_vectors;
}
@@ -1215,7 +1254,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->ring_active = false;
ring->vsi = vsi;
ring->dev = &pf->pdev->dev;
- ring->count = vsi->num_desc;
+ ring->count = vsi->num_tx_desc;
vsi->tx_rings[i] = ring;
}
@@ -1234,7 +1273,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->vsi = vsi;
ring->netdev = vsi->netdev;
ring->dev = &pf->pdev->dev;
- ring->count = vsi->num_desc;
+ ring->count = vsi->num_rx_desc;
vsi->rx_rings[i] = ring;
}
@@ -1253,7 +1292,11 @@ err_out:
* through the MSI-X enabling code. On a constrained vector budget, we map Tx
* and Rx rings to the vector as "efficiently" as possible.
*/
+#ifdef CONFIG_DCB
+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+#else
static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+#endif /* CONFIG_DCB */
{
int q_vectors = vsi->num_q_vectors;
int tx_rings_rem, rx_rings_rem;
@@ -1339,7 +1382,6 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
*/
static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
{
- u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
struct ice_aqc_get_set_rss_keys *key;
struct ice_pf *pf = vsi->back;
enum ice_status status;
@@ -1361,31 +1403,30 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
vsi->rss_table_size);
if (status) {
- dev_err(&vsi->back->pdev->dev,
+ dev_err(&pf->pdev->dev,
"set_rss_lut failed, error %d\n", status);
err = -EIO;
goto ice_vsi_cfg_rss_exit;
}
- key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL);
+ key = devm_kzalloc(&pf->pdev->dev, sizeof(*key), GFP_KERNEL);
if (!key) {
err = -ENOMEM;
goto ice_vsi_cfg_rss_exit;
}
if (vsi->rss_hkey_user)
- memcpy(seed, vsi->rss_hkey_user,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+ memcpy(key,
+ (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user,
+ ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
else
- netdev_rss_key_fill((void *)seed,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
- memcpy(&key->standard_rss_key, seed,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+ netdev_rss_key_fill((void *)key,
+ ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
if (status) {
- dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n",
+ dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n",
status);
err = -EIO;
}
@@ -1397,12 +1438,12 @@ ice_vsi_cfg_rss_exit:
}
/**
- * ice_add_mac_to_list - Add a mac address filter entry to the list
+ * ice_add_mac_to_list - Add a MAC address filter entry to the list
* @vsi: the VSI to be forwarded to
* @add_list: pointer to the list which contains MAC filter entries
* @macaddr: the MAC address to be added.
*
- * Adds mac address filter entry to the temp list
+ * Adds MAC address filter entry to the temp list
*
* Returns 0 on success or ENOMEM on failure.
*/
@@ -1504,7 +1545,7 @@ void ice_free_fltr_list(struct device *dev, struct list_head *h)
/**
* ice_vsi_add_vlan - Add VSI membership for given VLAN
* @vsi: the VSI being configured
- * @vid: VLAN id to be added
+ * @vid: VLAN ID to be added
*/
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
{
@@ -1542,7 +1583,7 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
/**
* ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
* @vsi: the VSI being configured
- * @vid: VLAN id to be removed
+ * @vid: VLAN ID to be removed
*
* Returns 0 on success and negative on failure
*/
@@ -1551,7 +1592,8 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
struct ice_fltr_list_entry *list;
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
- int status = 0;
+ enum ice_status status;
+ int err = 0;
list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
if (!list)
@@ -1567,14 +1609,20 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
INIT_LIST_HEAD(&list->list_entry);
list_add(&list->list_entry, &tmp_add_list);
- if (ice_remove_vlan(&pf->hw, &tmp_add_list)) {
- dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n",
- vid, vsi->vsi_num);
- status = -EIO;
+ status = ice_remove_vlan(&pf->hw, &tmp_add_list);
+ if (status == ICE_ERR_DOES_NOT_EXIST) {
+ dev_dbg(&pf->pdev->dev,
+ "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
+ vid, vsi->vsi_num, status);
+ } else if (status) {
+ dev_err(&pf->pdev->dev,
+ "Error removing VLAN %d on vsi %i error: %d\n",
+ vid, vsi->vsi_num, status);
+ err = -EIO;
}
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
- return status;
+ return err;
}
/**
@@ -1586,7 +1634,6 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
*/
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
{
- int err = 0;
u16 i;
if (vsi->type == ICE_VSI_VF)
@@ -1601,14 +1648,19 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
vsi->rx_buf_len = ICE_RXBUF_2048;
setup_rings:
/* set up individual rings */
- for (i = 0; i < vsi->num_rxq && !err; i++)
- err = ice_setup_rx_ctx(vsi->rx_rings[i]);
+ for (i = 0; i < vsi->num_rxq; i++) {
+ int err;
- if (err) {
- dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n");
- return -EIO;
+ err = ice_setup_rx_ctx(vsi->rx_rings[i]);
+ if (err) {
+ dev_err(&vsi->back->pdev->dev,
+ "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
+ i, err);
+ return err;
+ }
}
- return err;
+
+ return 0;
}
/**
@@ -1640,7 +1692,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
num_q_grps = 1;
/* set up and configure the Tx queues for each enabled TC */
- for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) {
+ ice_for_each_traffic_class(tc) {
if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
break;
@@ -1660,10 +1712,10 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
rings[q_idx]->tail =
pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
- num_q_grps, qg_buf, buf_len,
- NULL);
+ i, num_q_grps, qg_buf,
+ buf_len, NULL);
if (status) {
- dev_err(&vsi->back->pdev->dev,
+ dev_err(&pf->pdev->dev,
"Failed to set LAN Tx queue context, error: %d\n",
status);
err = -ENODEV;
@@ -1707,7 +1759,7 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
* This function converts a decimal interrupt rate limit in usecs to the format
* expected by firmware.
*/
-static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
+u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
{
u32 val = intrl / gran;
@@ -1717,17 +1769,49 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
}
/**
+ * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
+ * @hw: board specific structure
+ */
+static void ice_cfg_itr_gran(struct ice_hw *hw)
+{
+ u32 regval = rd32(hw, GLINT_CTL);
+
+ /* no need to update global register if ITR gran is already set */
+ if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
+ (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
+ GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
+ GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
+ GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
+ GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
+ return;
+
+ regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
+ GLINT_CTL_ITR_GRAN_200_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
+ GLINT_CTL_ITR_GRAN_100_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
+ GLINT_CTL_ITR_GRAN_50_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
+ GLINT_CTL_ITR_GRAN_25_M);
+ wr32(hw, GLINT_CTL, regval);
+}
+
+/**
* ice_cfg_itr - configure the initial interrupt throttle values
* @hw: pointer to the HW structure
* @q_vector: interrupt vector that's being configured
- * @vector: HW vector index to apply the interrupt throttling to
*
* Configure interrupt throttling values for the ring containers that are
* associated with the interrupt vector passed in.
*/
static void
-ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
+ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
{
+ ice_cfg_itr_gran(hw);
+
if (q_vector->num_ring_rx) {
struct ice_ring_container *rc = &q_vector->rx;
@@ -1738,8 +1822,7 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
- rc->latency_range = ICE_LOW_LATENCY;
- wr32(hw, GLINT_ITR(rc->itr_idx, vector),
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
}
@@ -1753,8 +1836,7 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
- rc->latency_range = ICE_LOW_LATENCY;
- wr32(hw, GLINT_ITR(rc->itr_idx, vector),
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
}
}
@@ -1766,17 +1848,17 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- u16 vector = vsi->hw_base_vector;
struct ice_hw *hw = &pf->hw;
u32 txq = 0, rxq = 0;
int i, q;
- for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ for (i = 0; i < vsi->num_q_vectors; i++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
+ u16 reg_idx = q_vector->reg_idx;
- ice_cfg_itr(hw, q_vector, vector);
+ ice_cfg_itr(hw, q_vector);
- wr32(hw, GLINT_RATE(vector),
+ wr32(hw, GLINT_RATE(reg_idx),
ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
/* Both Transmit Queue Interrupt Cause Control register
@@ -1791,33 +1873,37 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
* tracked for this PF.
*/
for (q = 0; q < q_vector->num_ring_tx; q++) {
- int itr_idx = q_vector->tx.itr_idx;
+ int itr_idx = (q_vector->tx.itr_idx <<
+ QINT_TQCTL_ITR_INDX_S) &
+ QINT_TQCTL_ITR_INDX_M;
u32 val;
if (vsi->type == ICE_VSI_VF)
- val = QINT_TQCTL_CAUSE_ENA_M |
- (itr_idx << QINT_TQCTL_ITR_INDX_S) |
- ((i + 1) << QINT_TQCTL_MSIX_INDX_S);
+ val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
+ (((i + 1) << QINT_TQCTL_MSIX_INDX_S) &
+ QINT_TQCTL_MSIX_INDX_M);
else
- val = QINT_TQCTL_CAUSE_ENA_M |
- (itr_idx << QINT_TQCTL_ITR_INDX_S) |
- (vector << QINT_TQCTL_MSIX_INDX_S);
+ val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
+ ((reg_idx << QINT_TQCTL_MSIX_INDX_S) &
+ QINT_TQCTL_MSIX_INDX_M);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
txq++;
}
for (q = 0; q < q_vector->num_ring_rx; q++) {
- int itr_idx = q_vector->rx.itr_idx;
+ int itr_idx = (q_vector->rx.itr_idx <<
+ QINT_RQCTL_ITR_INDX_S) &
+ QINT_RQCTL_ITR_INDX_M;
u32 val;
if (vsi->type == ICE_VSI_VF)
- val = QINT_RQCTL_CAUSE_ENA_M |
- (itr_idx << QINT_RQCTL_ITR_INDX_S) |
- ((i + 1) << QINT_RQCTL_MSIX_INDX_S);
+ val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
+ (((i + 1) << QINT_RQCTL_MSIX_INDX_S) &
+ QINT_RQCTL_MSIX_INDX_M);
else
- val = QINT_RQCTL_CAUSE_ENA_M |
- (itr_idx << QINT_RQCTL_ITR_INDX_S) |
- (vector << QINT_RQCTL_MSIX_INDX_S);
+ val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
+ ((reg_idx << QINT_RQCTL_MSIX_INDX_S) &
+ QINT_RQCTL_MSIX_INDX_M);
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
rxq++;
}
@@ -1848,6 +1934,10 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
*/
ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
+ /* Preserve existing VLAN strip setting */
+ ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
+ ICE_AQ_VSI_VLAN_EMOD_M);
+
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
@@ -1937,7 +2027,7 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
* ice_vsi_stop_tx_rings - Disable Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
- * @rel_vmvf_num: Relative id of VF/VM
+ * @rel_vmvf_num: Relative ID of VF/VM
* @rings: Tx ring array to be stopped
* @offset: offset within vsi->txq_map
*/
@@ -1947,10 +2037,10 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
+ int tc, q_idx = 0, err = 0;
+ u16 *q_ids, *q_handles, i;
enum ice_status status;
u32 *q_teids, val;
- u16 *q_ids, i;
- int err = 0;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
return -EINVAL;
@@ -1967,50 +2057,69 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
goto err_alloc_q_ids;
}
- /* set up the Tx queue list to be disabled */
- ice_for_each_txq(vsi, i) {
- u16 v_idx;
+ q_handles = devm_kcalloc(&pf->pdev->dev, vsi->num_txq,
+ sizeof(*q_handles), GFP_KERNEL);
+ if (!q_handles) {
+ err = -ENOMEM;
+ goto err_alloc_q_handles;
+ }
- if (!rings || !rings[i] || !rings[i]->q_vector) {
- err = -EINVAL;
- goto err_out;
- }
+ /* set up the Tx queue list to be disabled for each enabled TC */
+ ice_for_each_traffic_class(tc) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
+ break;
- q_ids[i] = vsi->txq_map[i + offset];
- q_teids[i] = rings[i]->txq_teid;
+ for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
+ if (!rings || !rings[q_idx] ||
+ !rings[q_idx]->q_vector) {
+ err = -EINVAL;
+ goto err_out;
+ }
- /* clear cause_ena bit for disabled queues */
- val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val);
+ q_ids[i] = vsi->txq_map[q_idx + offset];
+ q_teids[i] = rings[q_idx]->txq_teid;
+ q_handles[i] = i;
- /* software is expected to wait for 100 ns */
- ndelay(100);
+ /* clear cause_ena bit for disabled queues */
+ val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val);
- /* trigger a software interrupt for the vector associated to
- * the queue to schedule NAPI handler
+ /* software is expected to wait for 100 ns */
+ ndelay(100);
+
+ /* trigger a software interrupt for the vector
+ * associated to the queue to schedule NAPI handler
+ */
+ wr32(hw, GLINT_DYN_CTL(rings[i]->q_vector->reg_idx),
+ GLINT_DYN_CTL_SWINT_TRIG_M |
+ GLINT_DYN_CTL_INTENA_MSK_M);
+ q_idx++;
+ }
+ status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc,
+ vsi->num_txq, q_handles, q_ids,
+ q_teids, rst_src, rel_vmvf_num, NULL);
+
+ /* if the disable queue command was exercised during an active
+ * reset flow, ICE_ERR_RESET_ONGOING is returned. This is not
+ * an error as the reset operation disables queues at the
+ * hardware level anyway.
*/
- v_idx = rings[i]->q_vector->v_idx;
- wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
- GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
- }
- status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
- rst_src, rel_vmvf_num, NULL);
- /* if the disable queue command was exercised during an active reset
- * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as
- * the reset operation disables queues at the hardware level anyway.
- */
- if (status == ICE_ERR_RESET_ONGOING) {
- dev_info(&pf->pdev->dev,
- "Reset in progress. LAN Tx queues already disabled\n");
- } else if (status) {
- dev_err(&pf->pdev->dev,
- "Failed to disable LAN Tx queues, error: %d\n",
- status);
- err = -ENODEV;
+ if (status == ICE_ERR_RESET_ONGOING) {
+ dev_dbg(&pf->pdev->dev,
+ "Reset in progress. LAN Tx queues already disabled\n");
+ } else if (status) {
+ dev_err(&pf->pdev->dev,
+ "Failed to disable LAN Tx queues, error: %d\n",
+ status);
+ err = -ENODEV;
+ }
}
err_out:
+ devm_kfree(&pf->pdev->dev, q_handles);
+
+err_alloc_q_handles:
devm_kfree(&pf->pdev->dev, q_ids);
err_alloc_q_ids:
@@ -2023,10 +2132,11 @@ err_alloc_q_ids:
* ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
- * @rel_vmvf_num: Relative id of VF/VM
+ * @rel_vmvf_num: Relative ID of VF/VM
*/
-int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi,
- enum ice_disq_rst_src rst_src, u16 rel_vmvf_num)
+int
+ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num)
{
return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings,
0);
@@ -2036,19 +2146,22 @@ int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi,
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
+ * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
*
* returns 0 if VSI is updated, negative otherwise
*/
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
{
struct ice_vsi_ctx *ctxt;
struct device *dev;
+ struct ice_pf *pf;
int status;
if (!vsi)
return -EINVAL;
- dev = &vsi->back->pdev->dev;
+ pf = vsi->back;
+ dev = &pf->pdev->dev;
ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -2067,14 +2180,16 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
}
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
+ if (!vlan_promisc)
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
- status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
+ status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
- vsi->back->hw.adminq.sq_last_status);
+ pf->hw.adminq.sq_last_status);
goto err_out;
}
@@ -2089,12 +2204,98 @@ err_out:
return -EIO;
}
+static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
+{
+ struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;
+
+ vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
+ vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
+}
+
+/**
+ * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors
+ * @vsi: VSI to set the q_vectors register index on
+ */
+static int
+ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
+{
+ u16 i;
+
+ if (!vsi || !vsi->q_vectors)
+ return -EINVAL;
+
+ ice_for_each_q_vector(vsi, i) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ if (!q_vector) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set reg_idx on q_vector %d VSI %d\n",
+ i, vsi->vsi_num);
+ goto clear_reg_idx;
+ }
+
+ q_vector->reg_idx = q_vector->v_idx + vsi->hw_base_vector;
+ }
+
+ return 0;
+
+clear_reg_idx:
+ ice_for_each_q_vector(vsi, i) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ if (q_vector)
+ q_vector->reg_idx = 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule
+ * @vsi: the VSI being configured
+ * @add_rule: boolean value to add or remove ethertype filter rule
+ */
+static void
+ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
+{
+ struct ice_fltr_list_entry *list;
+ struct ice_pf *pf = vsi->back;
+ LIST_HEAD(tmp_add_list);
+ enum ice_status status;
+
+ list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
+ if (!list)
+ return;
+
+ list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
+ list->fltr_info.fltr_act = ICE_DROP_PACKET;
+ list->fltr_info.flag = ICE_FLTR_TX;
+ list->fltr_info.src_id = ICE_SRC_ID_VSI;
+ list->fltr_info.vsi_handle = vsi->idx;
+ list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype;
+
+ INIT_LIST_HEAD(&list->list_entry);
+ list_add(&list->list_entry, &tmp_add_list);
+
+ if (add_rule)
+ status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
+ else
+ status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
+
+ if (status)
+ dev_err(&pf->pdev->dev,
+ "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
+ vsi->vsi_num, status);
+
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+}
+
/**
* ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @pi: pointer to the port_info instance
* @type: VSI type
- * @vf_id: defines VF id to which this VSI connects. This field is meant to be
+ * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
* used only for ICE_VSI_VF VSI type. For other VSI types, should
* fill-in ICE_INVAL_VFID as input.
*
@@ -2112,7 +2313,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
- vsi = ice_vsi_alloc(pf, type);
+ if (type == ICE_VSI_VF)
+ vsi = ice_vsi_alloc(pf, type, vf_id);
+ else
+ vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);
+
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
return NULL;
@@ -2120,6 +2325,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
vsi->port_info = pi;
vsi->vsw = pf->first_sw;
+ if (vsi->type == ICE_VSI_PF)
+ vsi->ethtype = ETH_P_PAUSE;
+
if (vsi->type == ICE_VSI_VF)
vsi->vf_id = vf_id;
@@ -2132,7 +2340,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
/* set RSS capabilities */
ice_vsi_set_rss_params(vsi);
- /* set tc configuration */
+ /* set TC configuration */
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
@@ -2150,6 +2358,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_alloc_q_vector;
+ ret = ice_vsi_set_q_vectors_reg_idx(vsi);
+ if (ret)
+ goto unroll_vector_base;
+
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto unroll_vector_base;
@@ -2188,6 +2400,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
} else {
vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx;
}
+ ret = ice_vsi_set_q_vectors_reg_idx(vsi);
+ if (ret)
+ goto unroll_vector_base;
+
pf->q_left_tx -= vsi->alloc_txq;
pf->q_left_rx -= vsi->alloc_rxq;
break;
@@ -2203,18 +2419,29 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (ret) {
- dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n");
+ dev_err(&pf->pdev->dev,
+ "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
goto unroll_vector_base;
}
+ /* Add switch rule to drop all Tx Flow Control Frames, of look up
+ * type ETHERTYPE from VSIs, and restrict malicious VF from sending
+ * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
+ * The rule is added once for PF VSI in order to create appropriate
+ * recipe, since VSI/VSI list is ignored with drop action...
+ */
+ if (vsi->type == ICE_VSI_PF)
+ ice_vsi_add_rem_eth_mac(vsi, true);
+
return vsi;
unroll_vector_base:
/* reclaim SW interrupts back to the common pool */
- ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
/* reclaim HW interrupt back to the common pool */
- ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
+ ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
pf->num_avail_hw_msix += vsi->num_q_vectors;
unroll_alloc_q_vector:
ice_vsi_free_q_vectors(vsi);
@@ -2281,7 +2508,7 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
vsi->irqs_ready = false;
- for (i = 0; i < vsi->num_q_vectors; i++) {
+ ice_for_each_q_vector(vsi, i) {
u16 vector = i + base;
int irq_num;
@@ -2500,12 +2727,12 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
/* disable each interrupt */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- for (i = vsi->hw_base_vector;
- i < (vsi->num_q_vectors + vsi->hw_base_vector); i++)
- wr32(hw, GLINT_DYN_CTL(i), 0);
+ ice_for_each_q_vector(vsi, i)
+ wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
ice_flush(hw);
- for (i = 0; i < vsi->num_q_vectors; i++)
+
+ ice_for_each_q_vector(vsi, i)
synchronize_irq(pf->msix_entries[i + base].vector);
}
}
@@ -2551,22 +2778,23 @@ int ice_vsi_release(struct ice_vsi *vsi)
/* reclaim interrupt vectors back to PF */
if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
- ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector,
- vsi->idx);
+ ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
/* reclaim HW interrupts back to the common pool */
- ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector,
- vsi->idx);
+ ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
pf->num_avail_hw_msix += vsi->num_q_vectors;
} else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) {
/* Reclaim VF resources back only while freeing all VFs or
* vector reassignment is requested
*/
- ice_free_res(vsi->back->hw_irq_tracker, vf->first_vector_idx,
+ ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx,
vsi->idx);
pf->num_avail_hw_msix += pf->num_vf_msix;
}
+ if (vsi->type == ICE_VSI_PF)
+ ice_vsi_add_rem_eth_mac(vsi, false);
+
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi);
@@ -2596,6 +2824,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
int ice_vsi_rebuild(struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_vf *vf = NULL;
struct ice_pf *pf;
int ret, i;
@@ -2603,16 +2832,38 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
return -EINVAL;
pf = vsi->back;
+ if (vsi->type == ICE_VSI_VF)
+ vf = &pf->vf[vsi->vf_id];
+
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
- ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
- ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
- vsi->sw_base_vector = 0;
+
+ if (vsi->type != ICE_VSI_VF) {
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ vsi->sw_base_vector = 0;
+ /* reclaim HW interrupts back to the common pool */
+ ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector,
+ vsi->idx);
+ pf->num_avail_hw_msix += vsi->num_q_vectors;
+ } else {
+ /* Reclaim VF resources back to the common pool for reset and
+ * and rebuild, with vector reassignment
+ */
+ ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx,
+ vsi->idx);
+ pf->num_avail_hw_msix += pf->num_vf_msix;
+ }
vsi->hw_base_vector = 0;
+
ice_vsi_clear_rings(vsi);
- ice_vsi_free_arrays(vsi, false);
- ice_dev_onetime_setup(&vsi->back->hw);
- ice_vsi_set_num_qs(vsi);
+ ice_vsi_free_arrays(vsi);
+ ice_dev_onetime_setup(&pf->hw);
+ if (vsi->type == ICE_VSI_VF)
+ ice_vsi_set_num_qs(vsi, vf->vf_id);
+ else
+ ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
@@ -2620,7 +2871,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret < 0)
goto err_vsi;
- ret = ice_vsi_alloc_arrays(vsi, false);
+ ret = ice_vsi_alloc_arrays(vsi);
if (ret < 0)
goto err_vsi;
@@ -2634,6 +2885,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret)
goto err_vectors;
+ ret = ice_vsi_set_q_vectors_reg_idx(vsi);
+ if (ret)
+ goto err_vectors;
+
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto err_vectors;
@@ -2643,7 +2898,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
* receive traffic on first queue. Hence no need to capture
* return value
*/
- if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags))
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_vsi_cfg_rss_lut_key(vsi);
break;
case ICE_VSI_VF:
@@ -2655,12 +2910,16 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret)
goto err_vectors;
+ ret = ice_vsi_set_q_vectors_reg_idx(vsi);
+ if (ret)
+ goto err_vectors;
+
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto err_vectors;
- vsi->back->q_left_tx -= vsi->alloc_txq;
- vsi->back->q_left_rx -= vsi->alloc_rxq;
+ pf->q_left_tx -= vsi->alloc_txq;
+ pf->q_left_rx -= vsi->alloc_rxq;
break;
default:
break;
@@ -2673,8 +2932,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "Failed VSI lan queue config\n");
+ dev_err(&pf->pdev->dev,
+ "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
goto err_vectors;
}
return 0;
@@ -2690,7 +2950,7 @@ err_rings:
}
err_vsi:
ice_vsi_clear(vsi);
- set_bit(__ICE_RESET_FAILED, vsi->back->state);
+ set_bit(__ICE_RESET_FAILED, pf->state);
return ret;
}
@@ -2705,3 +2965,125 @@ bool ice_is_reset_in_progress(unsigned long *state)
test_bit(__ICE_CORER_REQ, state) ||
test_bit(__ICE_GLOBR_REQ, state);
}
+
+#ifdef CONFIG_DCB
+/**
+ * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
+ * @vsi: VSI being configured
+ * @ctx: the context buffer returned from AQ VSI update command
+ */
+static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
+{
+ vsi->info.mapping_flags = ctx->info.mapping_flags;
+ memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
+ sizeof(vsi->info.q_mapping));
+ memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+}
+
+/**
+ * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
+ * @vsi: the VSI being configured
+ * @ena_tc: TC map to be enabled
+ */
+static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct ice_pf *pf = vsi->back;
+ struct ice_dcbx_cfg *dcbcfg;
+ u8 netdev_tc;
+ int i;
+
+ if (!netdev)
+ return;
+
+ if (!ena_tc) {
+ netdev_reset_tc(netdev);
+ return;
+ }
+
+ if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
+ return;
+
+ dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+
+ ice_for_each_traffic_class(i)
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ netdev_set_tc_queue(netdev,
+ vsi->tc_cfg.tc_info[i].netdev_tc,
+ vsi->tc_cfg.tc_info[i].qcount_tx,
+ vsi->tc_cfg.tc_info[i].qoffset);
+
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ u8 ets_tc = dcbcfg->etscfg.prio_table[i];
+
+ /* Get the mapped netdev TC# for the UP */
+ netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
+ netdev_set_prio_tc_map(netdev, i, netdev_tc);
+ }
+}
+
+/**
+ * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
+ * @vsi: VSI to be configured
+ * @ena_tc: TC bitmap
+ *
+ * VSI queues expected to be quiesced before calling this function
+ */
+int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_vsi_ctx *ctx;
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ int i, ret = 0;
+ u8 num_tc = 0;
+
+ ice_for_each_traffic_class(i) {
+ /* build bitmap of enabled TCs */
+ if (ena_tc & BIT(i))
+ num_tc++;
+ /* populate max_txqs per TC */
+ max_txqs[i] = pf->num_lan_tx;
+ }
+
+ vsi->tc_cfg.ena_tc = ena_tc;
+ vsi->tc_cfg.numtc = num_tc;
+
+ ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->vf_num = 0;
+ ctx->info = vsi->info;
+
+ ice_vsi_setup_q_map(vsi, ctx);
+
+ /* must to indicate which section of VSI context are being modified */
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
+ status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
+ if (status) {
+ dev_info(&pf->pdev->dev, "Failed VSI Update\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "VSI %d failed TC config, error %d\n",
+ vsi->vsi_num, status);
+ ret = -EIO;
+ goto out;
+ }
+ ice_vsi_update_q_map(vsi, ctx);
+ vsi->info.valid_sections = 0;
+
+ ice_vsi_cfg_netdev_tc(vsi, ena_tc);
+out:
+ devm_kfree(&pf->pdev->dev, ctx);
+ return ret;
+}
+#endif /* CONFIG_DCB */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 7988a53729a9..a91d3553cc89 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -35,12 +35,16 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_vsi_delete(struct ice_vsi *vsi);
int ice_vsi_clear(struct ice_vsi *vsi);
+#ifdef CONFIG_DCB
+int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
+#endif /* CONFIG_DCB */
+
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type type, u16 vf_id);
@@ -62,6 +66,10 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
void ice_vsi_put_qs(struct ice_vsi *vsi);
+#ifdef CONFIG_DCB
+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
+#endif /* CONFIG_DCB */
+
void ice_vsi_dis_irq(struct ice_vsi *vsi);
void ice_vsi_free_irq(struct ice_vsi *vsi);
@@ -70,8 +78,7 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
-int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
-
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 47cc3f905b7f..7843abf4d44d 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -7,8 +7,9 @@
#include "ice.h"
#include "ice_lib.h"
+#include "ice_dcb_lib.h"
-#define DRV_VERSION "0.7.2-k"
+#define DRV_VERSION "0.7.4-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -30,7 +31,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_ops;
-static void ice_pf_dis_all_vsi(struct ice_pf *pf);
static void ice_rebuild(struct ice_pf *pf);
static void ice_vsi_release_all(struct ice_pf *pf);
@@ -113,14 +113,14 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
}
/**
- * ice_add_mac_to_sync_list - creates list of mac addresses to be synced
+ * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
* @netdev: the net device on which the sync is happening
- * @addr: mac address to sync
+ * @addr: MAC address to sync
*
* This is a callback function which is called by the in kernel device sync
* functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
* populates the tmp_sync_list, which is later used by ice_add_mac to add the
- * mac filters from the hardware.
+ * MAC filters from the hardware.
*/
static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
{
@@ -134,14 +134,14 @@ static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
}
/**
- * ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced
+ * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
* @netdev: the net device on which the unsync is happening
- * @addr: mac address to unsync
+ * @addr: MAC address to unsync
*
* This is a callback function which is called by the in kernel device unsync
* functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
* populates the tmp_unsync_list, which is later used by ice_remove_mac to
- * delete the mac filters from the hardware.
+ * delete the MAC filters from the hardware.
*/
static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
{
@@ -168,6 +168,39 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
}
/**
+ * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
+ * @vsi: the VSI being configured
+ * @promisc_m: mask of promiscuous config bits
+ * @set_promisc: enable or disable promisc flag request
+ *
+ */
+static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ enum ice_status status = 0;
+
+ if (vsi->type != ICE_VSI_PF)
+ return 0;
+
+ if (vsi->vlan_ena) {
+ status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
+ set_promisc);
+ } else {
+ if (set_promisc)
+ status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ else
+ status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ }
+
+ if (status)
+ return -EIO;
+
+ return 0;
+}
+
+/**
* ice_vsi_sync_fltr - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
@@ -182,6 +215,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
struct ice_hw *hw = &pf->hw;
enum ice_status status = 0;
u32 changed_flags = 0;
+ u8 promisc_m;
int err = 0;
if (!vsi->netdev)
@@ -211,7 +245,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
netif_addr_unlock_bh(netdev);
}
- /* Remove mac addresses in the unsync list */
+ /* Remove MAC addresses in the unsync list */
status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
if (status) {
@@ -223,12 +257,16 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
}
- /* Add mac addresses in the sync list */
+ /* Add MAC addresses in the sync list */
status = ice_add_mac(hw, &vsi->tmp_sync_list);
ice_free_fltr_list(dev, &vsi->tmp_sync_list);
- if (status) {
+ /* If filter is added successfully or already exists, do not go into
+ * 'if' condition and report it as error. Instead continue processing
+ * rest of the function.
+ */
+ if (status && status != ICE_ERR_ALREADY_EXISTS) {
netdev_err(netdev, "Failed to add MAC filters\n");
- /* If there is no more space for new umac filters, vsi
+ /* If there is no more space for new umac filters, VSI
* should go into promiscuous mode. There should be some
* space reserved for promiscuous filters.
*/
@@ -245,49 +283,56 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
}
/* check for changes in promiscuous modes */
- if (changed_flags & IFF_ALLMULTI)
- netdev_warn(netdev, "Unsupported configuration\n");
+ if (changed_flags & IFF_ALLMULTI) {
+ if (vsi->current_netdev_flags & IFF_ALLMULTI) {
+ if (vsi->vlan_ena)
+ promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_MCAST_PROMISC_BITS;
+
+ err = ice_cfg_promisc(vsi, promisc_m, true);
+ if (err) {
+ netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags &= ~IFF_ALLMULTI;
+ goto out_promisc;
+ }
+ } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
+ if (vsi->vlan_ena)
+ promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_MCAST_PROMISC_BITS;
+
+ err = ice_cfg_promisc(vsi, promisc_m, false);
+ if (err) {
+ netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags |= IFF_ALLMULTI;
+ goto out_promisc;
+ }
+ }
+ }
if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
if (vsi->current_netdev_flags & IFF_PROMISC) {
- /* Apply TX filter rule to get traffic from VMs */
- status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
- ICE_FLTR_TX);
- if (status) {
- netdev_err(netdev, "Error setting default VSI %i tx rule\n",
- vsi->vsi_num);
- vsi->current_netdev_flags &= ~IFF_PROMISC;
- err = -EIO;
- goto out_promisc;
- }
- /* Apply RX filter rule to get traffic from wire */
+ /* Apply Rx filter rule to get traffic from wire */
status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
ICE_FLTR_RX);
if (status) {
- netdev_err(netdev, "Error setting default VSI %i rx rule\n",
+ netdev_err(netdev, "Error setting default VSI %i Rx rule\n",
vsi->vsi_num);
vsi->current_netdev_flags &= ~IFF_PROMISC;
err = -EIO;
goto out_promisc;
}
} else {
- /* Clear TX filter rule to stop traffic from VMs */
- status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
- ICE_FLTR_TX);
- if (status) {
- netdev_err(netdev, "Error clearing default VSI %i tx rule\n",
- vsi->vsi_num);
- vsi->current_netdev_flags |= IFF_PROMISC;
- err = -EIO;
- goto out_promisc;
- }
- /* Clear RX filter to remove traffic from wire */
+ /* Clear Rx filter to remove traffic from wire */
status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
ICE_FLTR_RX);
if (status) {
- netdev_err(netdev, "Error clearing default VSI %i rx rule\n",
+ netdev_err(netdev, "Error clearing default VSI %i Rx rule\n",
vsi->vsi_num);
vsi->current_netdev_flags |= IFF_PROMISC;
err = -EIO;
@@ -322,7 +367,7 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
- for (v = 0; v < pf->num_alloc_vsi; v++)
+ ice_for_each_vsi(pf, v)
if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
ice_vsi_sync_fltr(pf->vsi[v])) {
/* come back and try again later */
@@ -332,6 +377,51 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
}
/**
+ * ice_dis_vsi - pause a VSI
+ * @vsi: the VSI being paused
+ * @locked: is the rtnl_lock already held
+ */
+static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
+{
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return;
+
+ set_bit(__ICE_NEEDS_RESTART, vsi->state);
+
+ if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (netif_running(vsi->netdev)) {
+ if (!locked) {
+ rtnl_lock();
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ rtnl_unlock();
+ } else {
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ }
+ } else {
+ ice_vsi_close(vsi);
+ }
+ }
+}
+
+/**
+ * ice_pf_dis_all_vsi - Pause all VSIs on a PF
+ * @pf: the PF
+ * @locked: is the rtnl_lock already held
+ */
+#ifdef CONFIG_DCB
+void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
+#else
+static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
+#endif /* CONFIG_DCB */
+{
+ int v;
+
+ ice_for_each_vsi(pf, v)
+ if (pf->vsi[v])
+ ice_dis_vsi(pf->vsi[v], locked);
+}
+
+/**
* ice_prepare_for_reset - prep for the core to reset
* @pf: board private structure
*
@@ -342,12 +432,16 @@ ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
+ /* already prepared for reset */
+ if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
+ return;
+
/* Notify VFs of impending reset */
if (ice_check_sq_alive(hw, &hw->mailboxq))
ice_vc_notify_reset(pf);
/* disable the VSIs and their queues that are not already DOWN */
- ice_pf_dis_all_vsi(pf);
+ ice_pf_dis_all_vsi(pf, false);
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
@@ -394,6 +488,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_rebuild(pf);
clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(__ICE_PFR_REQ, pf->state);
+ ice_reset_all_vfs(pf, true);
}
}
@@ -416,10 +511,15 @@ static void ice_reset_subtask(struct ice_pf *pf)
* for the reset now), poll for reset done, rebuild and return.
*/
if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
- clear_bit(__ICE_GLOBR_RECV, pf->state);
- clear_bit(__ICE_CORER_RECV, pf->state);
- if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
- ice_prepare_for_reset(pf);
+ /* Perform the largest reset requested */
+ if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
+ reset_type = ICE_RESET_CORER;
+ if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
+ reset_type = ICE_RESET_GLOBR;
+ /* return if no valid reset type requested */
+ if (reset_type == ICE_RESET_INVAL)
+ return;
+ ice_prepare_for_reset(pf);
/* make sure we are ready to rebuild */
if (ice_check_reset(&pf->hw)) {
@@ -429,13 +529,14 @@ static void ice_reset_subtask(struct ice_pf *pf)
pf->hw.reset_ongoing = false;
ice_rebuild(pf);
/* clear bit to resume normal operations, but
- * ICE_NEEDS_RESTART bit is set incase rebuild failed
+ * ICE_NEEDS_RESTART bit is set in case rebuild failed
*/
clear_bit(__ICE_RESET_OICR_RECV, pf->state);
clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(__ICE_PFR_REQ, pf->state);
clear_bit(__ICE_CORER_REQ, pf->state);
clear_bit(__ICE_GLOBR_REQ, pf->state);
+ ice_reset_all_vfs(pf, true);
}
return;
@@ -469,6 +570,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
const char *speed;
const char *fc;
+ if (!vsi)
+ return;
+
if (vsi->current_isup == isup)
return;
@@ -519,6 +623,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
case ICE_FC_RX_PAUSE:
fc = "RX";
break;
+ case ICE_FC_NONE:
+ fc = "None";
+ break;
default:
fc = "Unknown";
break;
@@ -529,21 +636,22 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
/**
- * ice_vsi_link_event - update the vsi's netdev
- * @vsi: the vsi on which the link event occurred
- * @link_up: whether or not the vsi needs to be set up or down
+ * ice_vsi_link_event - update the VSI's netdev
+ * @vsi: the VSI on which the link event occurred
+ * @link_up: whether or not the VSI needs to be set up or down
*/
static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
{
- if (!vsi || test_bit(__ICE_DOWN, vsi->state))
+ if (!vsi)
+ return;
+
+ if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev)
return;
if (vsi->type == ICE_VSI_PF) {
- if (!vsi->netdev) {
- dev_dbg(&vsi->back->pdev->dev,
- "vsi->netdev is not initialized!\n");
+ if (link_up == netif_carrier_ok(vsi->netdev))
return;
- }
+
if (link_up) {
netif_carrier_on(vsi->netdev);
netif_tx_wake_all_queues(vsi->netdev);
@@ -558,61 +666,51 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
* ice_link_event - process the link event
* @pf: pf that the link event is associated with
* @pi: port_info for the port that the link event is associated with
+ * @link_up: true if the physical link is up and false if it is down
+ * @link_speed: current link speed received from the link event
*
- * Returns -EIO if ice_get_link_status() fails
- * Returns 0 on success
+ * Returns 0 on success and negative on failure
*/
static int
-ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
+ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
+ u16 link_speed)
{
- u8 new_link_speed, old_link_speed;
struct ice_phy_info *phy_info;
- bool new_link_same_as_old;
- bool new_link, old_link;
- u8 lport;
- u16 v;
+ struct ice_vsi *vsi;
+ u16 old_link_speed;
+ bool old_link;
+ int result;
phy_info = &pi->phy;
phy_info->link_info_old = phy_info->link_info;
- /* Force ice_get_link_status() to update link info */
- phy_info->get_link_info = true;
- old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
+ old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
old_link_speed = phy_info->link_info_old.link_speed;
- lport = pi->lport;
- if (ice_get_link_status(pi, &new_link)) {
+ /* update the link info structures and re-enable link events,
+ * don't bail on failure due to other book keeping needed
+ */
+ result = ice_update_link_info(pi);
+ if (result)
dev_dbg(&pf->pdev->dev,
- "Could not get link status for port %d\n", lport);
- return -EIO;
- }
-
- new_link_speed = phy_info->link_info.link_speed;
+ "Failed to update link status and re-enable link events for port %d\n",
+ pi->lport);
- new_link_same_as_old = (new_link == old_link &&
- new_link_speed == old_link_speed);
-
- ice_for_each_vsi(pf, v) {
- struct ice_vsi *vsi = pf->vsi[v];
-
- if (!vsi || !vsi->port_info)
- continue;
+ /* if the old link up/down and speed is the same as the new */
+ if (link_up == old_link && link_speed == old_link_speed)
+ return result;
- if (new_link_same_as_old &&
- (test_bit(__ICE_DOWN, vsi->state) ||
- new_link == netif_carrier_ok(vsi->netdev)))
- continue;
+ vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
+ if (!vsi || !vsi->port_info)
+ return -EINVAL;
- if (vsi->port_info->lport == lport) {
- ice_print_link_msg(vsi, new_link);
- ice_vsi_link_event(vsi, new_link);
- }
- }
+ ice_vsi_link_event(vsi, link_up);
+ ice_print_link_msg(vsi, link_up);
- if (!new_link_same_as_old && pf->num_alloc_vfs)
+ if (pf->num_alloc_vfs)
ice_vc_notify_link_state(pf);
- return 0;
+ return result;
}
/**
@@ -635,19 +733,73 @@ static void ice_watchdog_subtask(struct ice_pf *pf)
pf->serv_tmr_prev = jiffies;
- if (ice_link_event(pf, pf->hw.port_info))
- dev_dbg(&pf->pdev->dev, "ice_link_event failed\n");
-
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
ice_update_pf_stats(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++)
+ ice_for_each_vsi(pf, i)
if (pf->vsi[i] && pf->vsi[i]->netdev)
ice_update_vsi_stats(pf->vsi[i]);
}
/**
+ * ice_init_link_events - enable/initialize link events
+ * @pi: pointer to the port_info instance
+ *
+ * Returns -EIO on failure, 0 on success
+ */
+static int ice_init_link_events(struct ice_port_info *pi)
+{
+ u16 mask;
+
+ mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
+ ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
+
+ if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
+ dev_dbg(ice_hw_to_dev(pi->hw),
+ "Failed to set link event mask for port %d\n",
+ pi->lport);
+ return -EIO;
+ }
+
+ if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
+ dev_dbg(ice_hw_to_dev(pi->hw),
+ "Failed to enable link events for port %d\n",
+ pi->lport);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_handle_link_event - handle link event via ARQ
+ * @pf: pf that the link event is associated with
+ * @event: event structure containing link status info
+ */
+static int
+ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ struct ice_aqc_get_link_status_data *link_data;
+ struct ice_port_info *port_info;
+ int status;
+
+ link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
+ port_info = pf->hw.port_info;
+ if (!port_info)
+ return -EINVAL;
+
+ status = ice_link_event(pf, port_info,
+ !!(link_data->link_info & ICE_AQ_LINK_UP),
+ le16_to_cpu(link_data->link_speed));
+ if (status)
+ dev_dbg(&pf->pdev->dev,
+ "Could not process link event, error %d\n", status);
+
+ return status;
+}
+
+/**
* __ice_clean_ctrlq - helper function to clean controlq rings
* @pf: ptr to struct ice_pf
* @q_type: specific Control queue type
@@ -750,12 +902,20 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
opcode = le16_to_cpu(event.desc.opcode);
switch (opcode) {
+ case ice_aqc_opc_get_link_status:
+ if (ice_handle_link_event(pf, &event))
+ dev_err(&pf->pdev->dev,
+ "Could not handle link event\n");
+ break;
case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event);
break;
case ice_aqc_opc_fw_logging:
ice_output_fw_log(hw, &event.desc, event.msg_buf);
break;
+ case ice_aqc_opc_lldp_set_mib_change:
+ ice_dcb_process_lldp_set_mib_change(pf, &event);
+ break;
default:
dev_dbg(&pf->pdev->dev,
"%s Receive Queue unknown event 0x%04x ignored\n",
@@ -877,6 +1037,18 @@ static void ice_service_task_stop(struct ice_pf *pf)
}
/**
+ * ice_service_task_restart - restart service task and schedule works
+ * @pf: board private structure
+ *
+ * This function is needed for suspend and resume works (e.g WoL scenario)
+ */
+static void ice_service_task_restart(struct ice_pf *pf)
+{
+ clear_bit(__ICE_SERVICE_DIS, pf->state);
+ ice_service_task_schedule(pf);
+}
+
+/**
* ice_service_timer - timer callback to schedule service task
* @t: pointer to timer_list
*/
@@ -901,7 +1073,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
u32 reg;
int i;
- if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state))
+ if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state))
return;
/* find what triggered the MDD event */
@@ -993,10 +1165,12 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
struct ice_vf *vf = &pf->vf[i];
+ mdd_detected = false;
+
reg = rd32(hw, VP_MDET_TX_PQM(i));
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
- vf->num_mdd_events++;
+ mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1004,7 +1178,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_TX_TCLAN(i));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
- vf->num_mdd_events++;
+ mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1012,7 +1186,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_TX_TDPU(i));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
- vf->num_mdd_events++;
+ mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1020,26 +1194,19 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_RX(i));
if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF);
- vf->num_mdd_events++;
+ mdd_detected = true;
dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
i);
}
- if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) {
- dev_info(&pf->pdev->dev,
- "Too many MDD events on VF %d, disabled\n", i);
+ if (mdd_detected) {
+ vf->num_mdd_events++;
dev_info(&pf->pdev->dev,
"Use PF Control I/F to re-enable the VF\n");
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
}
}
- /* re-enable MDD interrupt cause */
- clear_bit(__ICE_MDD_EVENT_PENDING, pf->state);
- reg = rd32(hw, PFINT_OICR_ENA);
- reg |= PFINT_OICR_MAL_DETECT_M;
- wr32(hw, PFINT_OICR_ENA, reg);
- ice_flush(hw);
}
/**
@@ -1089,7 +1256,7 @@ static void ice_service_task(struct work_struct *work)
/**
* ice_set_ctrlq_len - helper function to set controlq length
- * @hw: pointer to the hw instance
+ * @hw: pointer to the HW instance
*/
static void ice_set_ctrlq_len(struct ice_hw *hw)
{
@@ -1111,8 +1278,9 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
* This is a callback function used by the irq_set_affinity_notifier function
* so that we may register to receive changes to the irq affinity masks.
*/
-static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
+static void
+ice_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
{
struct ice_q_vector *q_vector =
container_of(notify, struct ice_q_vector, affinity_notify);
@@ -1142,7 +1310,7 @@ static int ice_vsi_ena_irq(struct ice_vsi *vsi)
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
int i;
- for (i = 0; i < vsi->num_q_vectors; i++)
+ ice_for_each_q_vector(vsi, i)
ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
}
@@ -1184,10 +1352,9 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
- err = devm_request_irq(&pf->pdev->dev,
- pf->msix_entries[base + vector].vector,
- vsi->irq_handler, 0, q_vector->name,
- q_vector);
+ err = devm_request_irq(&pf->pdev->dev, irq_num,
+ vsi->irq_handler, 0,
+ q_vector->name, q_vector);
if (err) {
netdev_err(vsi->netdev,
"MSIX request_irq failed, error: %d\n", err);
@@ -1328,7 +1495,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
rd32(hw, PFHMC_ERRORDATA));
}
- /* Report and mask off any remaining unexpected interrupts */
+ /* Report any remaining unexpected interrupts */
oicr &= ena_mask;
if (oicr) {
dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
@@ -1342,12 +1509,9 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
set_bit(__ICE_PFR_REQ, pf->state);
ice_service_task_schedule(pf);
}
- ena_mask &= ~oicr;
}
ret = IRQ_HANDLED;
- /* re-enable interrupt causes that are not handled during this pass */
- wr32(hw, PFINT_OICR_ENA, ena_mask);
if (!test_bit(__ICE_DOWN, pf->state)) {
ice_service_task_schedule(pf);
ice_irq_dynamic_ena(hw, NULL, NULL);
@@ -1406,23 +1570,23 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
/**
* ice_ena_ctrlq_interrupts - enable control queue interrupts
* @hw: pointer to HW structure
- * @v_idx: HW vector index to associate the control queue interrupts with
+ * @reg_idx: HW vector index to associate the control queue interrupts with
*/
-static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 v_idx)
+static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
{
u32 val;
- val = ((v_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
+ val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
PFINT_OICR_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_OICR_CTL, val);
/* enable Admin queue Interrupt causes */
- val = ((v_idx & PFINT_FW_CTL_MSIX_INDX_M) |
+ val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
PFINT_FW_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_FW_CTL, val);
/* enable Mailbox queue Interrupt causes */
- val = ((v_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
+ val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_MBX_CTL, val);
@@ -1510,7 +1674,7 @@ void ice_napi_del(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ ice_for_each_q_vector(vsi, v_idx)
netif_napi_del(&vsi->q_vectors[v_idx]->napi);
}
@@ -1529,7 +1693,7 @@ static void ice_napi_add(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ ice_for_each_q_vector(vsi, v_idx)
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
ice_napi_poll, NAPI_POLL_WEIGHT);
}
@@ -1649,18 +1813,20 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
}
/**
- * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload
+ * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
* @netdev: network interface to be adjusted
* @proto: unused protocol
- * @vid: vlan id to be added
+ * @vid: VLAN ID to be added
*
- * net_device_ops implementation for adding vlan ids
+ * net_device_ops implementation for adding VLAN IDs
*/
-static int ice_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+static int
+ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
+ u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ int ret;
if (vid >= VLAN_N_VID) {
netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
@@ -1673,33 +1839,39 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
/* Enable VLAN pruning when VLAN 0 is added */
if (unlikely(!vid)) {
- int ret = ice_cfg_vlan_pruning(vsi, true);
-
+ ret = ice_cfg_vlan_pruning(vsi, true, false);
if (ret)
return ret;
}
- /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is
+ /* Add all VLAN IDs including 0 to the switch filter. VLAN ID 0 is
* needed to continue allowing all untagged packets since VLAN prune
* list is applied to all packets by the switch
*/
- return ice_vsi_add_vlan(vsi, vid);
+ ret = ice_vsi_add_vlan(vsi, vid);
+ if (!ret) {
+ vsi->vlan_ena = true;
+ set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
+ }
+
+ return ret;
}
/**
- * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
+ * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
* @netdev: network interface to be adjusted
* @proto: unused protocol
- * @vid: vlan id to be removed
+ * @vid: VLAN ID to be removed
*
- * net_device_ops implementation for removing vlan ids
+ * net_device_ops implementation for removing VLAN IDs
*/
-static int ice_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+static int
+ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
+ u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- int status;
+ int ret;
if (vsi->info.pvid)
return -EINVAL;
@@ -1707,15 +1879,17 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
* information
*/
- status = ice_vsi_kill_vlan(vsi, vid);
- if (status)
- return status;
+ ret = ice_vsi_kill_vlan(vsi, vid);
+ if (ret)
+ return ret;
/* Disable VLAN pruning when VLAN 0 is removed */
if (unlikely(!vid))
- status = ice_cfg_vlan_pruning(vsi, false);
+ ret = ice_cfg_vlan_pruning(vsi, false, false);
- return status;
+ vsi->vlan_ena = false;
+ set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
+ return ret;
}
/**
@@ -2033,23 +2207,6 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
- * ice_verify_itr_gran - verify driver's assumption of ITR granularity
- * @pf: pointer to the PF structure
- *
- * There is no error returned here because the driver will be able to handle a
- * different ITR granularity, but interrupt moderation will not be accurate if
- * the driver's assumptions are not verified. This assumption is made so we can
- * use constants in the hot path instead of accessing structure members.
- */
-static void ice_verify_itr_gran(struct ice_pf *pf)
-{
- if (pf->hw.itr_gran != (ICE_ITR_GRAN_S << 1))
- dev_warn(&pf->pdev->dev,
- "%d ITR granularity assumption is invalid, actual ITR granularity is %d. Interrupt moderation will be inaccurate!\n",
- (ICE_ITR_GRAN_S << 1), pf->hw.itr_gran);
-}
-
-/**
* ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
* @pf: pointer to the PF structure
*
@@ -2072,9 +2229,10 @@ static void ice_verify_cacheline_size(struct ice_pf *pf)
*
* Returns 0 on success, negative on failure
*/
-static int ice_probe(struct pci_dev *pdev,
- const struct pci_device_id __always_unused *ent)
+static int
+ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
{
+ struct device *dev = &pdev->dev;
struct ice_pf *pf;
struct ice_hw *hw;
int err;
@@ -2086,20 +2244,20 @@ static int ice_probe(struct pci_dev *pdev,
err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
if (err) {
- dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
+ dev_err(dev, "BAR0 I/O map error %d\n", err);
return err;
}
- pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL);
+ pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL);
if (!pf)
return -ENOMEM;
/* set up for high or low dma */
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err)
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ dev_err(dev, "DMA configuration failed: 0x%x\n", err);
return err;
}
@@ -2133,17 +2291,26 @@ static int ice_probe(struct pci_dev *pdev,
err = ice_init_hw(hw);
if (err) {
- dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
+ dev_err(dev, "ice_init_hw failed: %d\n", err);
err = -EIO;
goto err_exit_unroll;
}
- dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
+ dev_info(dev, "firmware %d.%d.%05d api %d.%d\n",
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
hw->api_maj_ver, hw->api_min_ver);
ice_init_pf(pf);
+ err = ice_init_pf_dcb(pf);
+ if (err) {
+ clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
+
+ /* do not fail overall init if DCB init fails */
+ err = 0;
+ }
+
ice_determine_q_usage(pf);
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
@@ -2152,8 +2319,8 @@ static int ice_probe(struct pci_dev *pdev,
goto err_init_pf_unroll;
}
- pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
- sizeof(*pf->vsi), GFP_KERNEL);
+ pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
+ GFP_KERNEL);
if (!pf->vsi) {
err = -ENOMEM;
goto err_init_pf_unroll;
@@ -2161,8 +2328,7 @@ static int ice_probe(struct pci_dev *pdev,
err = ice_init_interrupt_scheme(pf);
if (err) {
- dev_err(&pdev->dev,
- "ice_init_interrupt_scheme failed: %d\n", err);
+ dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
goto err_init_interrupt_unroll;
}
@@ -2178,15 +2344,13 @@ static int ice_probe(struct pci_dev *pdev,
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
err = ice_req_irq_msix_misc(pf);
if (err) {
- dev_err(&pdev->dev,
- "setup of misc vector failed: %d\n", err);
+ dev_err(dev, "setup of misc vector failed: %d\n", err);
goto err_init_interrupt_unroll;
}
}
/* create switch struct for the switch element created by FW on boot */
- pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(*pf->first_sw),
- GFP_KERNEL);
+ pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
if (!pf->first_sw) {
err = -ENOMEM;
goto err_msix_misc_unroll;
@@ -2204,8 +2368,7 @@ static int ice_probe(struct pci_dev *pdev,
err = ice_setup_pf_sw(pf);
if (err) {
- dev_err(&pdev->dev,
- "probe failed due to setup pf switch:%d\n", err);
+ dev_err(dev, "probe failed due to setup pf switch:%d\n", err);
goto err_alloc_sw_unroll;
}
@@ -2214,8 +2377,13 @@ static int ice_probe(struct pci_dev *pdev,
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+ err = ice_init_link_events(pf->hw.port_info);
+ if (err) {
+ dev_err(dev, "ice_init_link_events failed: %d\n", err);
+ goto err_alloc_sw_unroll;
+ }
+
ice_verify_cacheline_size(pf);
- ice_verify_itr_gran(pf);
return 0;
@@ -2227,7 +2395,7 @@ err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
- devm_kfree(&pdev->dev, pf->vsi);
+ devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
ice_deinit_hw(hw);
@@ -2272,6 +2440,136 @@ static void ice_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
}
+/**
+ * ice_pci_err_detected - warning that PCI error has been detected
+ * @pdev: PCI device information struct
+ * @err: the type of PCI error
+ *
+ * Called to warn that something happened on the PCI bus and the error handling
+ * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
+ */
+static pci_ers_result_t
+ice_pci_err_detected(struct pci_dev *pdev, enum pci_channel_state err)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (!pf) {
+ dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
+ __func__, err);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (!test_bit(__ICE_SUSPENDED, pf->state)) {
+ ice_service_task_stop(pf);
+
+ if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
+ set_bit(__ICE_PFR_REQ, pf->state);
+ ice_prepare_for_reset(pf);
+ }
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ice_pci_err_slot_reset - a PCI slot reset has just happened
+ * @pdev: PCI device information struct
+ *
+ * Called to determine if the driver can recover from the PCI slot reset by
+ * using a register read to determine if the device is recoverable.
+ */
+static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+ pci_ers_result_t result;
+ int err;
+ u32 reg;
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset, error %d\n",
+ err);
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_wake_from_d3(pdev, false);
+
+ /* Check for life */
+ reg = rd32(&pf->hw, GLGEN_RTRIG);
+ if (!reg)
+ result = PCI_ERS_RESULT_RECOVERED;
+ else
+ result = PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err)
+ dev_dbg(&pdev->dev,
+ "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
+ err);
+ /* non-fatal, continue */
+
+ return result;
+}
+
+/**
+ * ice_pci_err_resume - restart operations after PCI error recovery
+ * @pdev: PCI device information struct
+ *
+ * Called to allow the driver to bring things back up after PCI error and/or
+ * reset recovery have finished
+ */
+static void ice_pci_err_resume(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (!pf) {
+ dev_err(&pdev->dev,
+ "%s failed, device is unrecoverable\n", __func__);
+ return;
+ }
+
+ if (test_bit(__ICE_SUSPENDED, pf->state)) {
+ dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
+ __func__);
+ return;
+ }
+
+ ice_do_reset(pf, ICE_RESET_PFR);
+ ice_service_task_restart(pf);
+ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+}
+
+/**
+ * ice_pci_err_reset_prepare - prepare device driver for PCI reset
+ * @pdev: PCI device information struct
+ */
+static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (!test_bit(__ICE_SUSPENDED, pf->state)) {
+ ice_service_task_stop(pf);
+
+ if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
+ set_bit(__ICE_PFR_REQ, pf->state);
+ ice_prepare_for_reset(pf);
+ }
+ }
+}
+
+/**
+ * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
+ * @pdev: PCI device information struct
+ */
+static void ice_pci_err_reset_done(struct pci_dev *pdev)
+{
+ ice_pci_err_resume(pdev);
+}
+
/* ice_pci_tbl - PCI Device ID Table
*
* Wildcard entries (PCI_ANY_ID) should come last
@@ -2289,12 +2587,21 @@ static const struct pci_device_id ice_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
+static const struct pci_error_handlers ice_pci_err_handler = {
+ .error_detected = ice_pci_err_detected,
+ .slot_reset = ice_pci_err_slot_reset,
+ .reset_prepare = ice_pci_err_reset_prepare,
+ .reset_done = ice_pci_err_reset_done,
+ .resume = ice_pci_err_resume
+};
+
static struct pci_driver ice_driver = {
.name = KBUILD_MODNAME,
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
.sriov_configure = ice_sriov_configure,
+ .err_handler = &ice_pci_err_handler
};
/**
@@ -2341,7 +2648,7 @@ static void __exit ice_module_exit(void)
module_exit(ice_module_exit);
/**
- * ice_set_mac_address - NDO callback to set mac address
+ * ice_set_mac_address - NDO callback to set MAC address
* @netdev: network interface device structure
* @pi: pointer to an address structure
*
@@ -2378,14 +2685,14 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EBUSY;
}
- /* When we change the mac address we also have to change the mac address
- * based filter rules that were created previously for the old mac
+ /* When we change the MAC address we also have to change the MAC address
+ * based filter rules that were created previously for the old MAC
* address. So first, we remove the old filter rule using ice_remove_mac
* and then create a new filter rule using ice_add_mac. Note that for
- * both these operations, we first need to form a "list" of mac
- * addresses (even though in this case, we have only 1 mac address to be
+ * both these operations, we first need to form a "list" of MAC
+ * addresses (even though in this case, we have only 1 MAC address to be
* added/removed) and this done using ice_add_mac_to_list. Depending on
- * the ensuing operation this "list" of mac addresses is either to be
+ * the ensuing operation this "list" of MAC addresses is either to be
* added or removed from the filter.
*/
err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
@@ -2423,12 +2730,12 @@ free_lists:
return err;
}
- /* change the netdev's mac address */
+ /* change the netdev's MAC address */
memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev_dbg(vsi->netdev, "updated mac address to %pM\n",
netdev->dev_addr);
- /* write new mac address to the firmware */
+ /* write new MAC address to the firmware */
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
if (status) {
@@ -2470,7 +2777,7 @@ static void ice_set_rx_mode(struct net_device *netdev)
* @tb: pointer to array of nladdr (unused)
* @dev: the net device pointer
* @addr: the MAC address entry being added
- * @vid: VLAN id
+ * @vid: VLAN ID
* @flags: instructions from stack about fdb operation
* @extack: netlink extended ack
*/
@@ -2510,11 +2817,12 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
* @tb: pointer to array of nladdr (unused)
* @dev: the net device pointer
* @addr: the MAC address entry being added
- * @vid: VLAN id
+ * @vid: VLAN ID
*/
-static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
- struct net_device *dev, const unsigned char *addr,
- __always_unused u16 vid)
+static int
+ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ __always_unused u16 vid)
{
int err;
@@ -2538,13 +2846,16 @@ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
*/
-static int ice_set_features(struct net_device *netdev,
- netdev_features_t features)
+static int
+ice_set_features(struct net_device *netdev, netdev_features_t features)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
int ret = 0;
+ /* Multiple features can be changed in one call so keep features in
+ * separate if/else statements to guarantee each feature is checked
+ */
if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
ret = ice_vsi_manage_rss_lut(vsi, true);
else if (!(features & NETIF_F_RXHASH) &&
@@ -2557,8 +2868,9 @@ static int ice_set_features(struct net_device *netdev,
else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
ret = ice_vsi_manage_vlan_stripping(vsi, false);
- else if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
+ !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
ret = ice_vsi_manage_vlan_insertion(vsi);
else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
@@ -2568,8 +2880,8 @@ static int ice_set_features(struct net_device *netdev,
}
/**
- * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI
- * @vsi: VSI to setup vlan properties for
+ * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
+ * @vsi: VSI to setup VLAN properties for
*/
static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
{
@@ -2601,6 +2913,7 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (err)
return err;
}
+ ice_vsi_cfg_dcb_rings(vsi);
err = ice_vsi_cfg_lan_txqs(vsi);
if (!err)
@@ -2620,7 +2933,7 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
if (q_vector->rx.ring || q_vector->tx.ring)
@@ -2666,7 +2979,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_service_task_schedule(pf);
- return err;
+ return 0;
}
/**
@@ -2693,8 +3006,8 @@ int ice_up(struct ice_vsi *vsi)
* This function fetches stats from the ring considering the atomic operations
* that needs to be performed to read u64 values in 32 bit machine.
*/
-static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts,
- u64 *bytes)
+static void
+ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
{
unsigned int start;
*pkts = 0;
@@ -2911,6 +3224,8 @@ static void ice_update_pf_stats(struct ice_pf *pf)
ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded,
&prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
+ ice_update_dcb_stats(pf);
+
ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded,
&prev_ps->crc_errors, &cur_ps->crc_errors);
@@ -2992,7 +3307,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
if (q_vector->rx.ring || q_vector->tx.ring)
@@ -3276,7 +3591,7 @@ static void ice_vsi_release_all(struct ice_pf *pf)
if (!pf->vsi)
return;
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
continue;
@@ -3289,47 +3604,31 @@ static void ice_vsi_release_all(struct ice_pf *pf)
}
/**
- * ice_dis_vsi - pause a VSI
- * @vsi: the VSI being paused
+ * ice_ena_vsi - resume a VSI
+ * @vsi: the VSI being resume
* @locked: is the rtnl_lock already held
*/
-static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
+static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
{
- if (test_bit(__ICE_DOWN, vsi->state))
- return;
+ int err = 0;
- set_bit(__ICE_NEEDS_RESTART, vsi->state);
+ if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
+ return err;
+
+ clear_bit(__ICE_NEEDS_RESTART, vsi->state);
+
+ if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ struct net_device *netd = vsi->netdev;
- if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
- if (!locked) {
+ if (locked) {
+ err = netd->netdev_ops->ndo_open(netd);
+ } else {
rtnl_lock();
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ err = netd->netdev_ops->ndo_open(netd);
rtnl_unlock();
- } else {
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
}
} else {
- ice_vsi_close(vsi);
- }
- }
-}
-
-/**
- * ice_ena_vsi - resume a VSI
- * @vsi: the VSI being resume
- */
-static int ice_ena_vsi(struct ice_vsi *vsi)
-{
- int err = 0;
-
- if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) &&
- vsi->netdev) {
- if (netif_running(vsi->netdev)) {
- rtnl_lock();
- err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
- rtnl_unlock();
- } else {
err = ice_vsi_open(vsi);
}
}
@@ -3338,29 +3637,21 @@ static int ice_ena_vsi(struct ice_vsi *vsi)
}
/**
- * ice_pf_dis_all_vsi - Pause all VSIs on a PF
- * @pf: the PF
- */
-static void ice_pf_dis_all_vsi(struct ice_pf *pf)
-{
- int v;
-
- ice_for_each_vsi(pf, v)
- if (pf->vsi[v])
- ice_dis_vsi(pf->vsi[v], false);
-}
-
-/**
* ice_pf_ena_all_vsi - Resume all VSIs on a PF
* @pf: the PF
+ * @locked: is the rtnl_lock already held
*/
-static int ice_pf_ena_all_vsi(struct ice_pf *pf)
+#ifdef CONFIG_DCB
+int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
+#else
+static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
+#endif /* CONFIG_DCB */
{
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
- if (ice_ena_vsi(pf->vsi[v]))
+ if (ice_ena_vsi(pf->vsi[v], locked))
return -EIO;
return 0;
@@ -3375,16 +3666,12 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf)
int i;
/* loop through pf->vsi array and reinit the VSI if found */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
int err;
if (!pf->vsi[i])
continue;
- /* VF VSI rebuild isn't supported yet */
- if (pf->vsi[i]->type == ICE_VSI_VF)
- continue;
-
err = ice_vsi_rebuild(pf->vsi[i]);
if (err) {
dev_err(&pf->pdev->dev,
@@ -3412,7 +3699,7 @@ static int ice_vsi_replay_all(struct ice_pf *pf)
int i;
/* loop through pf->vsi array and replay the VSI if found */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
continue;
@@ -3479,6 +3766,8 @@ static void ice_rebuild(struct ice_pf *pf)
if (err)
goto err_sched_init_port;
+ ice_dcb_rebuild(pf);
+
/* reset search_hint of irq_trackers to 0 since interrupts are
* reclaimed and could be allocated from beginning during VSI rebuild
*/
@@ -3512,7 +3801,7 @@ static void ice_rebuild(struct ice_pf *pf)
}
/* restart the VSIs that were rebuilt and running before the reset */
- err = ice_pf_ena_all_vsi(pf);
+ err = ice_pf_ena_all_vsi(pf, false);
if (err) {
dev_err(&pf->pdev->dev, "error enabling VSIs\n");
/* no need to disable VSIs in tear down path in ice_rebuild()
@@ -3521,9 +3810,7 @@ static void ice_rebuild(struct ice_pf *pf)
goto err_vsi_rebuild;
}
- ice_reset_all_vfs(pf, true);
-
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
bool link_up;
if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
@@ -3710,7 +3997,7 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
/**
* ice_bridge_getlink - Get the hardware bridge mode
* @skb: skb buff
- * @pid: process id
+ * @pid: process ID
* @seq: RTNL message seq
* @dev: the netdev being configured
* @filter_mask: filter mask passed in
@@ -3909,8 +4196,7 @@ static void ice_tx_timeout(struct net_device *netdev)
/* Read interrupt register */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
val = rd32(hw,
- GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
- tx_ring->vsi->hw_base_vector));
+ GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 413fdbbcc4d0..62571d33d0d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -5,7 +5,7 @@
/**
* ice_aq_read_nvm
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @module_typeid: module pointer location in words from the NVM beginning
* @offset: byte offset from the module beginning
* @length: length of the section to be read (in bytes from the offset)
@@ -235,7 +235,7 @@ ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
/**
* ice_init_nvm - initializes NVM setting
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* This function reads and populates NVM settings such as Shadow RAM size,
* max_timeout, and blank_nvm_mode
@@ -248,7 +248,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
u32 fla, gens_stat;
u8 sr_size;
- /* The SR size is stored regardless of the nvm programming mode
+ /* The SR size is stored regardless of the NVM programming mode
* as the blank mode may be used in the factory line.
*/
gens_stat = rd32(hw, GLNVM_GENS);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 56049739a250..8d49f83be7a5 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -43,9 +43,9 @@ ice_sched_add_root_node(struct ice_port_info *pi,
/**
* ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
* @start_node: pointer to the starting ice_sched_node struct in a sub-tree
- * @teid: node teid to search
+ * @teid: node TEID to search
*
- * This function searches for a node matching the teid in the scheduling tree
+ * This function searches for a node matching the TEID in the scheduling tree
* from the SW DB. The search is recursive and is restricted by the number of
* layers it has searched through; stopping at the max supported layer.
*
@@ -66,7 +66,7 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
return NULL;
- /* Check if teid matches to any of the children nodes */
+ /* Check if TEID matches to any of the children nodes */
for (i = 0; i < start_node->num_children; i++)
if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
return start_node->children[i];
@@ -86,7 +86,7 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
/**
* ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cmd_opc: cmd opcode
* @elems_req: number of elements to request
* @buf: pointer to buffer
@@ -118,7 +118,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
/**
* ice_aq_query_sched_elems - query scheduler elements
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @elems_req: number of elements to query
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
*
* Query scheduling elements (0x0404)
*/
-static enum ice_status
+enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_get_elem *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
@@ -138,31 +138,6 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
}
/**
- * ice_sched_query_elem - query element information from hw
- * @hw: pointer to the hw struct
- * @node_teid: node teid to be queried
- * @buf: buffer to element information
- *
- * This function queries HW element information
- */
-static enum ice_status
-ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
- struct ice_aqc_get_elem *buf)
-{
- u16 buf_size, num_elem_ret = 0;
- enum ice_status status;
-
- buf_size = sizeof(*buf);
- memset(buf, 0, buf_size);
- buf->generic[0].node_teid = cpu_to_le32(node_teid);
- status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
- NULL);
- if (status || num_elem_ret != 1)
- ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
- return status;
-}
-
-/**
* ice_sched_add_node - Insert the Tx scheduler node in SW DB
* @pi: port information structure
* @layer: Scheduler layer of the node
@@ -226,7 +201,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
/**
* ice_aq_delete_sched_elems - delete scheduler elements
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @grps_req: number of groups to delete
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -246,13 +221,13 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
}
/**
- * ice_sched_remove_elems - remove nodes from hw
- * @hw: pointer to the hw struct
+ * ice_sched_remove_elems - remove nodes from HW
+ * @hw: pointer to the HW struct
* @parent: pointer to the parent node
* @num_nodes: number of nodes
* @node_teids: array of node teids to be deleted
*
- * This function remove nodes from hw
+ * This function remove nodes from HW
*/
static enum ice_status
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
@@ -276,7 +251,8 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
&num_groups_removed, NULL);
if (status || num_groups_removed != 1)
- ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
+ ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
+ hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf);
return status;
@@ -284,7 +260,7 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
/**
* ice_sched_get_first_node - get the first node of the given layer
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @parent: pointer the base node of the subtree
* @layer: layer number
*
@@ -360,12 +336,8 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
u32 teid = le32_to_cpu(node->info.node_teid);
- enum ice_status status;
- status = ice_sched_remove_elems(hw, node->parent, 1, &teid);
- if (status)
- ice_debug(hw, ICE_DBG_SCHED,
- "remove element failed %d\n", status);
+ ice_sched_remove_elems(hw, node->parent, 1, &teid);
}
parent = node->parent;
/* root has no parent */
@@ -409,7 +381,7 @@ err_exit:
/**
* ice_aq_get_dflt_topo - gets default scheduler topology
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @lport: logical port number
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -439,7 +411,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
/**
* ice_aq_add_sched_elems - adds scheduling element
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @grps_req: the number of groups that are requested to be added
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -460,7 +432,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
/**
* ice_aq_suspend_sched_elems - suspend scheduler elements
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @elems_req: number of elements to suspend
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -481,7 +453,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
/**
* ice_aq_resume_sched_elems - resume scheduler elements
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @elems_req: number of elements to resume
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -502,7 +474,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
/**
* ice_aq_query_sched_res - query scheduler resource
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @buf_size: buffer size in bytes
* @buf: pointer to buffer
* @cd: pointer to command details structure or NULL
@@ -521,13 +493,13 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
}
/**
- * ice_sched_suspend_resume_elems - suspend or resume hw nodes
- * @hw: pointer to the hw struct
+ * ice_sched_suspend_resume_elems - suspend or resume HW nodes
+ * @hw: pointer to the HW struct
* @num_nodes: number of nodes
* @node_teids: array of node teids to be suspended or resumed
* @suspend: true means suspend / false means resume
*
- * This function suspends or resumes hw nodes
+ * This function suspends or resumes HW nodes
*/
static enum ice_status
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
@@ -561,10 +533,54 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
}
/**
- * ice_sched_clear_agg - clears the agg related information
+ * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ * @tc: TC number
+ * @new_numqs: number of queues
+ */
+static enum ice_status
+ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+ struct ice_q_ctx *q_ctx;
+
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ /* allocate LAN queue contexts */
+ if (!vsi_ctx->lan_q_ctx[tc]) {
+ vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
+ new_numqs,
+ sizeof(*q_ctx),
+ GFP_KERNEL);
+ if (!vsi_ctx->lan_q_ctx[tc])
+ return ICE_ERR_NO_MEMORY;
+ vsi_ctx->num_lan_q_entries[tc] = new_numqs;
+ return 0;
+ }
+ /* num queues are increased, update the queue contexts */
+ if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
+ u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
+
+ q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
+ sizeof(*q_ctx), GFP_KERNEL);
+ if (!q_ctx)
+ return ICE_ERR_NO_MEMORY;
+ memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
+ prev_num * sizeof(*q_ctx));
+ devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
+ vsi_ctx->lan_q_ctx[tc] = q_ctx;
+ vsi_ctx->num_lan_q_entries[tc] = new_numqs;
+ }
+ return 0;
+}
+
+/**
+ * ice_sched_clear_agg - clears the aggregator related information
* @hw: pointer to the hardware structure
*
- * This function removes agg list and free up agg related memory
+ * This function removes aggregator list and free up aggregator related memory
* previously allocated.
*/
void ice_sched_clear_agg(struct ice_hw *hw)
@@ -622,7 +638,7 @@ void ice_sched_clear_port(struct ice_port_info *pi)
/**
* ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Cleanup scheduling elements from SW DB for all the ports
*/
@@ -646,16 +662,16 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
}
/**
- * ice_sched_add_elems - add nodes to hw and SW DB
+ * ice_sched_add_elems - add nodes to HW and SW DB
* @pi: port information structure
* @tc_node: pointer to the branch node
* @parent: pointer to the parent node
* @layer: layer number to add nodes
* @num_nodes: number of nodes
* @num_nodes_added: pointer to num nodes added
- * @first_node_teid: if new nodes are added then return the teid of first node
+ * @first_node_teid: if new nodes are added then return the TEID of first node
*
- * This function add nodes to hw as well as to SW DB for a given layer
+ * This function add nodes to HW as well as to SW DB for a given layer
*/
static enum ice_status
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
@@ -697,7 +713,8 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
&num_groups_added, NULL);
if (status || num_groups_added != 1) {
- ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n");
+ ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
+ hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf);
return ICE_ERR_CFG;
}
@@ -748,7 +765,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
* @parent: pointer to parent node
* @layer: layer number to add nodes
* @num_nodes: number of nodes to be added
- * @first_node_teid: pointer to the first node teid
+ * @first_node_teid: pointer to the first node TEID
* @num_nodes_added: pointer to number of nodes added
*
* This function add nodes to a given layer.
@@ -800,7 +817,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
*num_nodes_added += num_added;
}
- /* Don't modify the first node teid memory if the first node was
+ /* Don't modify the first node TEID memory if the first node was
* added already in the above call. Instead send some temp
* memory for all other recursive calls.
*/
@@ -832,7 +849,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
/**
* ice_sched_get_qgrp_layer - get the current queue group layer number
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* This function returns the current queue group layer number
*/
@@ -844,7 +861,7 @@ static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
/**
* ice_sched_get_vsi_layer - get the current VSI layer number
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* This function returns the current VSI layer number
*/
@@ -855,7 +872,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* 7 4
* 5 or less sw_entry_point_layer
*/
- /* calculate the vsi layer based on number of layers. */
+ /* calculate the VSI layer based on number of layers. */
if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
@@ -973,7 +990,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
goto err_init_port;
}
- /* If the last node is a leaf node then the index of the Q group
+ /* If the last node is a leaf node then the index of the queue group
* layer is two less than the number of elements.
*/
if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
@@ -1082,7 +1099,7 @@ sched_query_out:
/**
* ice_sched_find_node_in_subtree - Find node in part of base node subtree
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @base: pointer to the base node
* @node: pointer to the node to search
*
@@ -1114,13 +1131,13 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
}
/**
- * ice_sched_get_free_qparent - Get a free lan or rdma q group node
+ * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc: branch number
- * @owner: lan or rdma
+ * @owner: LAN or RDMA
*
- * This function retrieves a free lan or rdma q group node
+ * This function retrieves a free LAN or RDMA queue group node
*/
struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
@@ -1138,11 +1155,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
if (!vsi_ctx)
return NULL;
vsi_node = vsi_ctx->sched.vsi_node[tc];
- /* validate invalid VSI id */
+ /* validate invalid VSI ID */
if (!vsi_node)
goto lan_q_exit;
- /* get the first q group node from VSI sub-tree */
+ /* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer);
while (qgrp_node) {
/* make sure the qgroup node is part of the VSI subtree */
@@ -1158,12 +1175,12 @@ lan_q_exit:
}
/**
- * ice_sched_get_vsi_node - Get a VSI node based on VSI id
- * @hw: pointer to the hw struct
+ * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
+ * @hw: pointer to the HW struct
* @tc_node: pointer to the TC node
* @vsi_handle: software VSI handle
*
- * This function retrieves a VSI node for a given VSI id from a given
+ * This function retrieves a VSI node for a given VSI ID from a given
* TC branch
*/
static struct ice_sched_node *
@@ -1188,7 +1205,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
/**
* ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @num_qs: number of queues
* @num_nodes: num nodes array
*
@@ -1204,7 +1221,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
- /* calculate num nodes from q group to VSI layer */
+ /* calculate num nodes from queue group to VSI layer */
for (i = qgl; i > vsil; i--) {
/* round to the next integer if there is a remainder */
num = DIV_ROUND_UP(num, hw->max_children[i]);
@@ -1220,10 +1237,10 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
* @vsi_handle: software VSI handle
* @tc_node: pointer to the TC node
* @num_nodes: pointer to the num nodes that needs to be added per layer
- * @owner: node owner (lan or rdma)
+ * @owner: node owner (LAN or RDMA)
*
* This function adds the VSI child nodes to tree. It gets called for
- * lan and rdma separately.
+ * LAN and RDMA separately.
*/
static enum ice_status
ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
@@ -1271,44 +1288,8 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
}
/**
- * ice_sched_rm_vsi_child_nodes - remove VSI child nodes from the tree
- * @pi: port information structure
- * @vsi_node: pointer to the VSI node
- * @num_nodes: pointer to the num nodes that needs to be removed per layer
- * @owner: node owner (lan or rdma)
- *
- * This function removes the VSI child nodes from the tree. It gets called for
- * lan and rdma separately.
- */
-static void
-ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
- struct ice_sched_node *vsi_node, u16 *num_nodes,
- u8 owner)
-{
- struct ice_sched_node *node, *next;
- u8 i, qgl, vsil;
- u16 num;
-
- qgl = ice_sched_get_qgrp_layer(pi->hw);
- vsil = ice_sched_get_vsi_layer(pi->hw);
-
- for (i = qgl; i > vsil; i--) {
- num = num_nodes[i];
- node = ice_sched_get_first_node(pi->hw, vsi_node, i);
- while (node && num) {
- next = node->sibling;
- if (node->owner == owner && !node->num_children) {
- ice_free_sched_node(pi, node);
- num--;
- }
- node = next;
- }
- }
-}
-
-/**
* ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @tc_node: pointer to TC node
* @num_nodes: pointer to num nodes array
*
@@ -1427,7 +1408,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
/* calculate number of supported nodes needed for this VSI */
ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
- /* add vsi supported nodes to tc subtree */
+ /* add VSI supported nodes to TC subtree */
return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
num_nodes);
}
@@ -1446,7 +1427,6 @@ static enum ice_status
ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, u16 new_numqs, u8 owner)
{
- u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
struct ice_sched_node *vsi_node;
struct ice_sched_node *tc_node;
@@ -1454,7 +1434,6 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u16 prev_numqs;
- u8 i;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
@@ -1468,41 +1447,30 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!vsi_ctx)
return ICE_ERR_PARAM;
- if (owner == ICE_SCHED_NODE_OWNER_LAN)
- prev_numqs = vsi_ctx->sched.max_lanq[tc];
- else
- return ICE_ERR_PARAM;
-
- /* num queues are not changed */
- if (prev_numqs == new_numqs)
+ prev_numqs = vsi_ctx->sched.max_lanq[tc];
+ /* num queues are not changed or less than the previous number */
+ if (new_numqs <= prev_numqs)
+ return status;
+ status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
+ if (status)
return status;
-
- /* calculate number of nodes based on prev/new number of qs */
- if (prev_numqs)
- ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes);
if (new_numqs)
ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
-
- if (prev_numqs > new_numqs) {
- for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
- new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i];
-
- ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes,
- owner);
- } else {
- for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
- new_num_nodes[i] -= prev_num_nodes[i];
-
- status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
- new_num_nodes, owner);
- if (status)
- return status;
- }
-
+ /* Keep the max number of queue configuration all the time. Update the
+ * tree only if number of queues > previous number of queues. This may
+ * leave some extra nodes in the tree if number of queues < previous
+ * number but that wouldn't harm anything. Removing those extra nodes
+ * may complicate the code if those nodes are part of SRL or
+ * individually rate limited.
+ */
+ status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
+ new_num_nodes, owner);
+ if (status)
+ return status;
vsi_ctx->sched.max_lanq[tc] = new_numqs;
- return status;
+ return 0;
}
/**
@@ -1511,7 +1479,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
* @vsi_handle: software VSI handle
* @tc: TC number
* @maxqs: max number of queues
- * @owner: lan or rdma
+ * @owner: LAN or RDMA
* @enable: TC enabled or disabled
*
* This function adds/updates VSI nodes based on the number of queues. If TC is
@@ -1527,6 +1495,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
return ICE_ERR_PARAM;
@@ -1535,7 +1504,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
return ICE_ERR_PARAM;
vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
- /* suspend the VSI if tc is not enabled */
+ /* suspend the VSI if TC is not enabled */
if (!enable) {
if (vsi_node && vsi_node->in_use) {
u32 teid = le32_to_cpu(vsi_node->info.node_teid);
@@ -1586,7 +1555,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
}
/**
- * ice_sched_rm_agg_vsi_entry - remove agg related VSI info entry
+ * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
* @pi: port information structure
* @vsi_handle: software VSI handle
*
@@ -1646,8 +1615,9 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
{
enum ice_status status = ICE_ERR_PARAM;
struct ice_vsi_ctx *vsi_ctx;
- u8 i, j = 0;
+ u8 i;
+ ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
return status;
mutex_lock(&pi->sched_lock);
@@ -1655,8 +1625,9 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!vsi_ctx)
goto exit_sched_rm_vsi_cfg;
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ ice_for_each_traffic_class(i) {
struct ice_sched_node *vsi_node, *tc_node;
+ u8 j = 0;
tc_node = ice_sched_get_tc_node(pi, i);
if (!tc_node)
@@ -1689,7 +1660,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
ice_free_sched_node(pi, vsi_node);
vsi_ctx->sched.vsi_node[i] = NULL;
- /* clean up agg related vsi info if any */
+ /* clean up aggregator related VSI info if any */
ice_sched_rm_agg_vsi_info(pi, vsi_handle);
}
if (owner == ICE_SCHED_NODE_OWNER_LAN)
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index bee8221ad146..3902a8ad3025 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -24,6 +24,10 @@ struct ice_sched_agg_info {
};
/* FW AQ command calls */
+enum ice_status
+ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_get_elem *buf, u16 buf_size,
+ u16 *elems_ret, struct ice_sq_cd *cd);
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_clear_port(struct ice_port_info *pi);
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index 683f48824a29..17afe6acb18a 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -12,6 +12,7 @@ enum ice_status {
ICE_ERR_PARAM = -1,
ICE_ERR_NOT_IMPL = -2,
ICE_ERR_NOT_READY = -3,
+ ICE_ERR_NOT_SUPPORTED = -4,
ICE_ERR_BAD_PTR = -5,
ICE_ERR_INVAL_SIZE = -6,
ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 09d1c314b68f..9f1f595ae7e6 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -19,7 +19,7 @@
* byte 6 = 0x2: to identify it as locally administered SA MAC
* byte 12 = 0x81 & byte 13 = 0x00:
* In case of VLAN filter first two bytes defines ether type (0x8100)
- * and remaining two bytes are placeholder for programming a given VLAN id
+ * and remaining two bytes are placeholder for programming a given VLAN ID
* In case of Ether type filter it is treated as header without VLAN tag
* and byte 12 and 13 is used to program a given Ether type instead
*/
@@ -51,7 +51,7 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
/**
* ice_aq_alloc_free_res - command to allocate/free resources
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @num_entries: number of resource entries in buffer
* @buf: Indirect buffer to hold data parameters and response
* @buf_size: size of buffer for indirect commands
@@ -87,7 +87,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Allocate memory for the entire recipe table and initialize the structures/
* entries corresponding to basic recipes.
@@ -163,7 +163,7 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
/**
* ice_aq_add_vsi
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
*
@@ -206,7 +206,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_aq_free_vsi
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_ctx: pointer to a VSI context struct
* @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
* @cd: pointer to command details structure or NULL
@@ -242,7 +242,7 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_aq_update_vsi
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
*
@@ -279,7 +279,7 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_is_vsi_valid - check whether the VSI is valid or not
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
* check whether the VSI is valid or not
@@ -290,11 +290,11 @@ bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
}
/**
- * ice_get_hw_vsi_num - return the hw VSI number
- * @hw: pointer to the hw struct
+ * ice_get_hw_vsi_num - return the HW VSI number
+ * @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
- * return the hw VSI number
+ * return the HW VSI number
* Caution: call this function only if VSI is valid (ice_is_vsi_valid)
*/
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
@@ -304,7 +304,7 @@ u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
* return the VSI context entry for a given VSI handle
@@ -316,21 +316,42 @@ struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_save_vsi_ctx - save the VSI context for a given VSI handle
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: VSI handle
* @vsi: VSI context pointer
*
* save the VSI context entry for a given VSI handle
*/
-static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
- struct ice_vsi_ctx *vsi)
+static void
+ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
{
hw->vsi_ctx[vsi_handle] = vsi;
}
/**
+ * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ */
+static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_vsi_ctx *vsi;
+ u8 i;
+
+ vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi)
+ return;
+ ice_for_each_traffic_class(i) {
+ if (vsi->lan_q_ctx[i]) {
+ devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
+ vsi->lan_q_ctx[i] = NULL;
+ }
+ }
+}
+
+/**
* ice_clear_vsi_ctx - clear the VSI context entry
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
* clear the VSI context entry
@@ -341,6 +362,7 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
vsi = ice_get_vsi_ctx(hw, vsi_handle);
if (vsi) {
+ ice_clear_vsi_q_ctx(hw, vsi_handle);
devm_kfree(ice_hw_to_dev(hw), vsi);
hw->vsi_ctx[vsi_handle] = NULL;
}
@@ -348,7 +370,7 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_clear_all_vsi_ctx - clear all the VSI context entries
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*/
void ice_clear_all_vsi_ctx(struct ice_hw *hw)
{
@@ -360,7 +382,7 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw)
/**
* ice_add_vsi - add VSI context to the hardware and VSI handle list
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: unique VSI handle provided by drivers
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
@@ -383,7 +405,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
return status;
tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!tmp_vsi_ctx) {
- /* Create a new vsi context */
+ /* Create a new VSI context */
tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*tmp_vsi_ctx), GFP_KERNEL);
if (!tmp_vsi_ctx) {
@@ -398,12 +420,12 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
}
- return status;
+ return 0;
}
/**
* ice_free_vsi- free VSI context from hardware and VSI handle list
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: unique VSI handle
* @vsi_ctx: pointer to a VSI context struct
* @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
@@ -428,7 +450,7 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_update_vsi
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: unique VSI handle
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
@@ -447,8 +469,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_aq_alloc_free_vsi_list
- * @hw: pointer to the hw struct
- * @vsi_list_id: VSI list id returned or used for lookup
+ * @hw: pointer to the HW struct
+ * @vsi_list_id: VSI list ID returned or used for lookup
* @lkup_type: switch rule filter lookup type
* @opc: switch rules population command type - pass in the command opcode
*
@@ -504,7 +526,7 @@ ice_aq_alloc_free_vsi_list_exit:
/**
* ice_aq_sw_rules - add/update/remove switch rules
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @rule_list: pointer to switch rule population list
* @rule_list_sz: total size of the rule list in bytes
* @num_rules: number of switch rules in the rule_list
@@ -643,21 +665,43 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
fi->fltr_act == ICE_FWD_TO_Q ||
fi->fltr_act == ICE_FWD_TO_QGRP)) {
- fi->lb_en = true;
- /* Do not set lan_en to TRUE if
+ /* Setting LB for prune actions will result in replicated
+ * packets to the internal switch that will be dropped.
+ */
+ if (fi->lkup_type != ICE_SW_LKUP_VLAN)
+ fi->lb_en = true;
+
+ /* Set lan_en to TRUE if
* 1. The switch is a VEB AND
* 2
- * 2.1 The lookup is MAC with unicast addr for MAC, OR
- * 2.2 The lookup is MAC_VLAN with unicast addr for MAC
+ * 2.1 The lookup is a directional lookup like ethertype,
+ * promiscuous, ethertype-MAC, promiscuous-VLAN
+ * and default-port OR
+ * 2.2 The lookup is VLAN, OR
+ * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
+ * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
+ *
+ * OR
+ *
+ * The switch is a VEPA.
*
- * In all other cases, the LAN enable has to be set to true.
+ * In all other cases, the LAN enable has to be set to false.
*/
- if (!(hw->evb_veb &&
- ((fi->lkup_type == ICE_SW_LKUP_MAC &&
- is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
- (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
- is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr)))))
+ if (hw->evb_veb) {
+ if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
+ fi->lkup_type == ICE_SW_LKUP_PROMISC ||
+ fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
+ fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ fi->lkup_type == ICE_SW_LKUP_DFLT ||
+ fi->lkup_type == ICE_SW_LKUP_VLAN ||
+ (fi->lkup_type == ICE_SW_LKUP_MAC &&
+ !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
+ (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
+ !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
+ fi->lan_en = true;
+ } else {
fi->lan_en = true;
+ }
}
}
@@ -799,7 +843,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
* @hw: pointer to the hardware structure
* @m_ent: the management entry for which sw marker needs to be added
* @sw_marker: sw marker to tag the Rx descriptor with
- * @l_id: large action resource id
+ * @l_id: large action resource ID
*
* Create a large action to hold software marker and update the switch rule
* entry pointed by m_ent with newly created large action
@@ -811,8 +855,8 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
/* For software marker we need 3 large actions
* 1. FWD action: FWD TO VSI or VSI LIST
- * 2. GENERIC VALUE action to hold the profile id
- * 3. GENERIC VALUE action to hold the software marker id
+ * 2. GENERIC VALUE action to hold the profile ID
+ * 3. GENERIC VALUE action to hold the software marker ID
*/
const u16 num_lg_acts = 3;
enum ice_status status;
@@ -875,13 +919,13 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
ice_aqc_opc_update_sw_rules);
- /* Update the action to point to the large action id */
+ /* Update the action to point to the large action ID */
rx_tx->pdata.lkup_tx_rx.act =
cpu_to_le32(ICE_SINGLE_ACT_PTR |
((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
ICE_SINGLE_ACT_PTR_VAL_M));
- /* Use the filter rule id of the previously created rule with single
+ /* Use the filter rule ID of the previously created rule with single
* act. Once the update happens, hardware will treat this as large
* action
*/
@@ -904,10 +948,10 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
* @hw: pointer to the hardware structure
* @vsi_handle_arr: array of VSI handles to set in the VSI mapping
* @num_vsi: number of VSI handles in the array
- * @vsi_list_id: VSI list id generated as part of allocate resource
+ * @vsi_list_id: VSI list ID generated as part of allocate resource
*
- * Helper function to create a new entry of VSI list id to VSI mapping
- * using the given VSI list id
+ * Helper function to create a new entry of VSI list ID to VSI mapping
+ * using the given VSI list ID
*/
static struct ice_vsi_list_map_info *
ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
@@ -935,13 +979,13 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* @hw: pointer to the hardware structure
* @vsi_handle_arr: array of VSI handles to form a VSI list
* @num_vsi: number of VSI handles in the array
- * @vsi_list_id: VSI list id generated as part of allocate resource
+ * @vsi_list_id: VSI list ID generated as part of allocate resource
* @remove: Boolean value to indicate if this is a remove action
* @opc: switch rules population command type - pass in the command opcode
* @lkup_type: lookup type of the filter
*
* Call AQ command to add a new switch rule or update existing switch rule
- * using the given VSI list id
+ * using the given VSI list ID
*/
static enum ice_status
ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
@@ -998,7 +1042,7 @@ exit:
/**
* ice_create_vsi_list_rule - Creates and populates a VSI list rule
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle_arr: array of VSI handles to form a VSI list
* @num_vsi: number of VSI handles in the array
* @vsi_list_id: stores the ID of the VSI list to be created
@@ -1092,7 +1136,7 @@ ice_create_pkt_fwd_rule_exit:
* @f_info: filter information for switch rule
*
* Call AQ command to update a previously created switch rule with a
- * VSI list id
+ * VSI list ID
*/
static enum ice_status
ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
@@ -1119,7 +1163,7 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
/**
* ice_update_sw_rule_bridge_mode
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Updates unicast switch filter rules based on VEB/VEPA mode
*/
@@ -1174,7 +1218,7 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Allocate a new VSI list and add two VSIs
* to this list using switch rule command
* Update the previously created switch rule with the
- * newly created VSI list id
+ * newly created VSI list ID
* if a VSI list was previously created
* Add the new VSI to the previously created VSI list set
* using the update switch rule command
@@ -1255,7 +1299,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
return 0;
/* Update the previously created VSI list set with
- * the new VSI id passed in
+ * the new VSI ID passed in
*/
vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
opcode = ice_aqc_opc_update_sw_rules;
@@ -1263,7 +1307,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
vsi_list_id, false, opcode,
new_fltr->lkup_type);
- /* update VSI list mapping info with new VSI id */
+ /* update VSI list mapping info with new VSI ID */
if (!status)
set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
}
@@ -1305,7 +1349,7 @@ ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
* @hw: pointer to the hardware structure
* @recp_id: lookup type for which VSI lists needs to be searched
* @vsi_handle: VSI handle to be found in VSI list
- * @vsi_list_id: VSI list id found containing vsi_handle
+ * @vsi_list_id: VSI list ID found containing vsi_handle
*
* Helper function to search a VSI list with single entry containing given VSI
* handle element. This can be extended further to search VSI list with more
@@ -1336,7 +1380,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
/**
* ice_add_rule_internal - add rule for a given lookup type
* @hw: pointer to the hardware structure
- * @recp_id: lookup type (recipe id) for which rule has to be added
+ * @recp_id: lookup type (recipe ID) for which rule has to be added
* @f_entry: structure containing MAC forwarding information
*
* Adds or updates the rule lists for a given recipe
@@ -1381,7 +1425,7 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
/**
* ice_remove_vsi_list_rule
* @hw: pointer to the hardware structure
- * @vsi_list_id: VSI list id generated as part of allocate resource
+ * @vsi_list_id: VSI list ID generated as part of allocate resource
* @lkup_type: switch rule filter lookup type
*
* The VSI list should be emptied before this function is called to remove the
@@ -1506,7 +1550,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
/**
* ice_remove_rule_internal - Remove a filter rule of a given type
* @hw: pointer to the hardware structure
- * @recp_id: recipe id for which the rule needs to removed
+ * @recp_id: recipe ID for which the rule needs to removed
* @f_entry: rule entry containing filter information
*/
static enum ice_status
@@ -1556,7 +1600,7 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
if (status)
goto exit;
- /* if vsi count goes to zero after updating the vsi list */
+ /* if VSI count goes to zero after updating the VSI list */
if (list_elem->vsi_count == 0)
remove_rule = true;
}
@@ -1634,7 +1678,7 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
return ICE_ERR_PARAM;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
- /* update the src in case it is vsi num */
+ /* update the src in case it is VSI num */
if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
return ICE_ERR_PARAM;
m_list_itr->fltr_info.src = hw_vsi_id;
@@ -1710,7 +1754,7 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
((u8 *)r_iter + (elem_sent * s_rule_size));
}
- /* Fill up rule id based on the value returned from FW */
+ /* Fill up rule ID based on the value returned from FW */
r_iter = s_rule;
list_for_each_entry(m_list_itr, m_list, list_entry) {
struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
@@ -1770,7 +1814,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
new_fltr = &f_entry->fltr_info;
- /* VLAN id should only be 12 bits */
+ /* VLAN ID should only be 12 bits */
if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
return ICE_ERR_PARAM;
@@ -1828,7 +1872,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
}
}
} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
- /* Update existing VSI list to add new VSI id only if it used
+ /* Update existing VSI list to add new VSI ID only if it used
* by one VLAN rule.
*/
cur_fltr = &v_list_itr->fltr_info;
@@ -1838,7 +1882,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
/* If VLAN rule exists and VSI list being used by this rule is
* referenced by more than 1 VLAN rule. Then create a new VSI
* list appending previous VSI with new VSI and update existing
- * VLAN rule to point to new VSI list id
+ * VLAN rule to point to new VSI list ID
*/
struct ice_fltr_info tmp_fltr;
u16 vsi_handle_arr[2];
@@ -1926,6 +1970,65 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
}
/**
+ * ice_add_eth_mac - Add ethertype and MAC based filter rule
+ * @hw: pointer to the hardware structure
+ * @em_list: list of ether type MAC filter, MAC is optional
+ */
+enum ice_status
+ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
+{
+ struct ice_fltr_list_entry *em_list_itr;
+
+ if (!em_list || !hw)
+ return ICE_ERR_PARAM;
+
+ list_for_each_entry(em_list_itr, em_list, list_entry) {
+ enum ice_sw_lkup_type l_type =
+ em_list_itr->fltr_info.lkup_type;
+
+ if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
+ l_type != ICE_SW_LKUP_ETHERTYPE)
+ return ICE_ERR_PARAM;
+
+ em_list_itr->fltr_info.flag = ICE_FLTR_TX;
+ em_list_itr->status = ice_add_rule_internal(hw, l_type,
+ em_list_itr);
+ if (em_list_itr->status)
+ return em_list_itr->status;
+ }
+ return 0;
+}
+
+/**
+ * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
+ * @hw: pointer to the hardware structure
+ * @em_list: list of ethertype or ethertype MAC entries
+ */
+enum ice_status
+ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
+{
+ struct ice_fltr_list_entry *em_list_itr, *tmp;
+
+ if (!em_list || !hw)
+ return ICE_ERR_PARAM;
+
+ list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
+ enum ice_sw_lkup_type l_type =
+ em_list_itr->fltr_info.lkup_type;
+
+ if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
+ l_type != ICE_SW_LKUP_ETHERTYPE)
+ return ICE_ERR_PARAM;
+
+ em_list_itr->status = ice_remove_rule_internal(hw, l_type,
+ em_list_itr);
+ if (em_list_itr->status)
+ return em_list_itr->status;
+ }
+ return 0;
+}
+
+/**
* ice_rem_sw_rule_info
* @hw: pointer to the hardware structure
* @rule_head: pointer to the switch list structure that we want to delete
@@ -2170,7 +2273,7 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_mgmt_list_entry *fm_entry;
enum ice_status status = 0;
- /* check to make sure VSI id is valid and within boundary */
+ /* check to make sure VSI ID is valid and within boundary */
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
@@ -2190,6 +2293,291 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
}
/**
+ * ice_determine_promisc_mask
+ * @fi: filter info to parse
+ *
+ * Helper function to determine which ICE_PROMISC_ mask corresponds
+ * to given filter into.
+ */
+static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
+{
+ u16 vid = fi->l_data.mac_vlan.vlan_id;
+ u8 *macaddr = fi->l_data.mac.mac_addr;
+ bool is_tx_fltr = false;
+ u8 promisc_mask = 0;
+
+ if (fi->flag == ICE_FLTR_TX)
+ is_tx_fltr = true;
+
+ if (is_broadcast_ether_addr(macaddr))
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
+ else if (is_multicast_ether_addr(macaddr))
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
+ else if (is_unicast_ether_addr(macaddr))
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
+ if (vid)
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
+
+ return promisc_mask;
+}
+
+/**
+ * ice_remove_promisc - Remove promisc based filter rules
+ * @hw: pointer to the hardware structure
+ * @recp_id: recipe ID for which the rule needs to removed
+ * @v_list: list of promisc entries
+ */
+static enum ice_status
+ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
+ struct list_head *v_list)
+{
+ struct ice_fltr_list_entry *v_list_itr, *tmp;
+
+ list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
+ v_list_itr->status =
+ ice_remove_rule_internal(hw, recp_id, v_list_itr);
+ if (v_list_itr->status)
+ return v_list_itr->status;
+ }
+ return 0;
+}
+
+/**
+ * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to clear mode
+ * @promisc_mask: mask of promiscuous config bits to clear
+ * @vid: VLAN ID to clear VLAN promiscuous
+ */
+enum ice_status
+ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_list_entry *fm_entry, *tmp;
+ struct list_head remove_list_head;
+ struct ice_fltr_mgmt_list_entry *itr;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = 0;
+ u8 recipe_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ if (vid)
+ recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
+ else
+ recipe_id = ICE_SW_LKUP_PROMISC;
+
+ rule_head = &sw->recp_list[recipe_id].filt_rules;
+ rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
+
+ INIT_LIST_HEAD(&remove_list_head);
+
+ mutex_lock(rule_lock);
+ list_for_each_entry(itr, rule_head, list_entry) {
+ u8 fltr_promisc_mask = 0;
+
+ if (!ice_vsi_uses_fltr(itr, vsi_handle))
+ continue;
+
+ fltr_promisc_mask |=
+ ice_determine_promisc_mask(&itr->fltr_info);
+
+ /* Skip if filter is not completely specified by given mask */
+ if (fltr_promisc_mask & ~promisc_mask)
+ continue;
+
+ status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
+ &remove_list_head,
+ &itr->fltr_info);
+ if (status) {
+ mutex_unlock(rule_lock);
+ goto free_fltr_list;
+ }
+ }
+ mutex_unlock(rule_lock);
+
+ status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
+
+free_fltr_list:
+ list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
+ list_del(&fm_entry->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fm_entry);
+ }
+
+ return status;
+}
+
+/**
+ * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @vid: VLAN ID to set VLAN promiscuous
+ */
+enum ice_status
+ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
+{
+ enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
+ struct ice_fltr_list_entry f_list_entry;
+ struct ice_fltr_info new_fltr;
+ enum ice_status status = 0;
+ bool is_tx_fltr;
+ u16 hw_vsi_id;
+ int pkt_type;
+ u8 recipe_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ memset(&new_fltr, 0, sizeof(new_fltr));
+
+ if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
+ new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
+ new_fltr.l_data.mac_vlan.vlan_id = vid;
+ recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
+ } else {
+ new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
+ recipe_id = ICE_SW_LKUP_PROMISC;
+ }
+
+ /* Separate filters must be set for each direction/packet type
+ * combination, so we will loop over the mask value, store the
+ * individual type, and clear it out in the input mask as it
+ * is found.
+ */
+ while (promisc_mask) {
+ u8 *mac_addr;
+
+ pkt_type = 0;
+ is_tx_fltr = false;
+
+ if (promisc_mask & ICE_PROMISC_UCAST_RX) {
+ promisc_mask &= ~ICE_PROMISC_UCAST_RX;
+ pkt_type = UCAST_FLTR;
+ } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
+ promisc_mask &= ~ICE_PROMISC_UCAST_TX;
+ pkt_type = UCAST_FLTR;
+ is_tx_fltr = true;
+ } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
+ promisc_mask &= ~ICE_PROMISC_MCAST_RX;
+ pkt_type = MCAST_FLTR;
+ } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
+ promisc_mask &= ~ICE_PROMISC_MCAST_TX;
+ pkt_type = MCAST_FLTR;
+ is_tx_fltr = true;
+ } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
+ promisc_mask &= ~ICE_PROMISC_BCAST_RX;
+ pkt_type = BCAST_FLTR;
+ } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
+ promisc_mask &= ~ICE_PROMISC_BCAST_TX;
+ pkt_type = BCAST_FLTR;
+ is_tx_fltr = true;
+ }
+
+ /* Check for VLAN promiscuous flag */
+ if (promisc_mask & ICE_PROMISC_VLAN_RX) {
+ promisc_mask &= ~ICE_PROMISC_VLAN_RX;
+ } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
+ promisc_mask &= ~ICE_PROMISC_VLAN_TX;
+ is_tx_fltr = true;
+ }
+
+ /* Set filter DA based on packet type */
+ mac_addr = new_fltr.l_data.mac.mac_addr;
+ if (pkt_type == BCAST_FLTR) {
+ eth_broadcast_addr(mac_addr);
+ } else if (pkt_type == MCAST_FLTR ||
+ pkt_type == UCAST_FLTR) {
+ /* Use the dummy ether header DA */
+ ether_addr_copy(mac_addr, dummy_eth_header);
+ if (pkt_type == MCAST_FLTR)
+ mac_addr[0] |= 0x1; /* Set multicast bit */
+ }
+
+ /* Need to reset this to zero for all iterations */
+ new_fltr.flag = 0;
+ if (is_tx_fltr) {
+ new_fltr.flag |= ICE_FLTR_TX;
+ new_fltr.src = hw_vsi_id;
+ } else {
+ new_fltr.flag |= ICE_FLTR_RX;
+ new_fltr.src = hw->port_info->lport;
+ }
+
+ new_fltr.fltr_act = ICE_FWD_TO_VSI;
+ new_fltr.vsi_handle = vsi_handle;
+ new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
+ f_list_entry.fltr_info = new_fltr;
+
+ status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
+ if (status)
+ goto set_promisc_exit;
+ }
+
+set_promisc_exit:
+ return status;
+}
+
+/**
+ * ice_set_vlan_vsi_promisc
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @rm_vlan_promisc: Clear VLANs VSI promisc mode
+ *
+ * Configure VSI with all associated VLANs to given promiscuous mode(s)
+ */
+enum ice_status
+ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ bool rm_vlan_promisc)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_list_entry *list_itr, *tmp;
+ struct list_head vsi_list_head;
+ struct list_head *vlan_head;
+ struct mutex *vlan_lock; /* Lock to protect filter rule list */
+ enum ice_status status;
+ u16 vlan_id;
+
+ INIT_LIST_HEAD(&vsi_list_head);
+ vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
+ vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
+ mutex_lock(vlan_lock);
+ status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
+ &vsi_list_head);
+ mutex_unlock(vlan_lock);
+ if (status)
+ goto free_fltr_list;
+
+ list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
+ vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
+ if (rm_vlan_promisc)
+ status = ice_clear_vsi_promisc(hw, vsi_handle,
+ promisc_mask, vlan_id);
+ else
+ status = ice_set_vsi_promisc(hw, vsi_handle,
+ promisc_mask, vlan_id);
+ if (status)
+ break;
+ }
+
+free_fltr_list:
+ list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
+ list_del(&list_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), list_itr);
+ }
+ return status;
+}
+
+/**
* ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to remove filters from
@@ -2224,12 +2612,14 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
case ICE_SW_LKUP_VLAN:
ice_remove_vlan(hw, &remove_list_head);
break;
+ case ICE_SW_LKUP_PROMISC:
+ case ICE_SW_LKUP_PROMISC_VLAN:
+ ice_remove_promisc(hw, lkup, &remove_list_head);
+ break;
case ICE_SW_LKUP_MAC_VLAN:
case ICE_SW_LKUP_ETHERTYPE:
case ICE_SW_LKUP_ETHERTYPE_MAC:
- case ICE_SW_LKUP_PROMISC:
case ICE_SW_LKUP_DFLT:
- case ICE_SW_LKUP_PROMISC_VLAN:
case ICE_SW_LKUP_LAST:
default:
ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
@@ -2263,7 +2653,7 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
* ice_replay_vsi_fltr - Replay filters for requested VSI
* @hw: pointer to the hardware structure
* @vsi_handle: driver VSI handle
- * @recp_id: Recipe id for which rules need to be replayed
+ * @recp_id: Recipe ID for which rules need to be replayed
* @list_head: list for which filters need to be replayed
*
* Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
@@ -2287,7 +2677,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
f_entry.fltr_info = itr->fltr_info;
if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
itr->fltr_info.vsi_handle == vsi_handle) {
- /* update the src in case it is vsi num */
+ /* update the src in case it is VSI num */
if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
f_entry.fltr_info.src = hw_vsi_id;
status = ice_add_rule_internal(hw, recp_id, &f_entry);
@@ -2302,7 +2692,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
f_entry.fltr_info.vsi_handle = vsi_handle;
f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
- /* update the src in case it is vsi num */
+ /* update the src in case it is VSI num */
if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
f_entry.fltr_info.src = hw_vsi_id;
if (recp_id == ICE_SW_LKUP_VLAN)
@@ -2342,7 +2732,7 @@ enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_rm_all_sw_replay_rule_info - deletes filter replay rules
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Deletes the filter replay rules.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index d5ef0bd58bf9..732b0b9b2e15 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -9,6 +9,13 @@
#define ICE_SW_CFG_MAX_BUF_LEN 2048
#define ICE_DFLT_VSI_INVAL 0xff
#define ICE_VSI_INVAL_ID 0xffff
+#define ICE_INVAL_Q_HANDLE 0xFFFF
+#define ICE_INVAL_Q_HANDLE 0xFFFF
+
+/* VSI queue context structure */
+struct ice_q_ctx {
+ u16 q_handle;
+};
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
@@ -20,6 +27,8 @@ struct ice_vsi_ctx {
struct ice_sched_vsi_info sched;
u8 alloc_from_pool;
u8 vf_num;
+ u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];
+ struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];
};
enum ice_sw_fwd_act_type {
@@ -44,7 +53,7 @@ enum ice_sw_lkup_type {
ICE_SW_LKUP_LAST
};
-/* type of filter src id */
+/* type of filter src ID */
enum ice_src_id {
ICE_SRC_ID_UNKNOWN = 0,
ICE_SRC_ID_VSI,
@@ -95,8 +104,8 @@ struct ice_fltr_info {
/* Depending on filter action */
union {
- /* queue id in case of ICE_FWD_TO_Q and starting
- * queue id in case of ICE_FWD_TO_QGRP.
+ /* queue ID in case of ICE_FWD_TO_Q and starting
+ * queue ID in case of ICE_FWD_TO_QGRP.
*/
u16 q_id:11;
u16 hw_vsi_id:10;
@@ -143,7 +152,7 @@ struct ice_sw_recipe {
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
};
-/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
+/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */
struct ice_vsi_list_map_info {
struct list_head list_entry;
DECLARE_BITMAP(vsi_map, ICE_MAX_VSI);
@@ -165,7 +174,7 @@ struct ice_fltr_list_entry {
* used for VLAN membership.
*/
struct ice_fltr_mgmt_list_entry {
- /* back pointer to VSI list id to VSI list mapping */
+ /* back pointer to VSI list ID to VSI list mapping */
struct ice_vsi_list_map_info *vsi_list_info;
u16 vsi_count;
#define ICE_INVAL_LG_ACT_INDEX 0xffff
@@ -178,6 +187,17 @@ struct ice_fltr_mgmt_list_entry {
u8 counter_index;
};
+enum ice_promisc_flags {
+ ICE_PROMISC_UCAST_RX = 0x1,
+ ICE_PROMISC_UCAST_TX = 0x2,
+ ICE_PROMISC_MCAST_RX = 0x4,
+ ICE_PROMISC_MCAST_TX = 0x8,
+ ICE_PROMISC_BCAST_RX = 0x10,
+ ICE_PROMISC_BCAST_TX = 0x20,
+ ICE_PROMISC_VLAN_RX = 0x40,
+ ICE_PROMISC_VLAN_TX = 0x80,
+};
+
/* VSI related commands */
enum ice_status
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
@@ -198,11 +218,27 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
+enum ice_status
+ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
+enum ice_status
+ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
+enum ice_status
+ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
+
+/* Promisc/defport setup for VSIs */
enum ice_status
ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
+enum ice_status
+ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+enum ice_status
+ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+enum ice_status
+ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ bool rm_vlan_promisc);
enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 1af21bbe180e..2364eaf33d23 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -6,6 +6,7 @@
#include <linux/prefetch.h>
#include <linux/mm.h>
#include "ice.h"
+#include "ice_dcb_lib.h"
#define ICE_RX_HDR_SIZE 256
@@ -100,8 +101,8 @@ void ice_free_tx_ring(struct ice_ring *tx_ring)
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
- int napi_budget)
+static bool
+ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget)
{
unsigned int total_bytes = 0, total_pkts = 0;
unsigned int budget = vsi->work_lmt;
@@ -236,9 +237,9 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
if (!tx_ring->tx_buf)
return -ENOMEM;
- /* round up to nearest 4K */
+ /* round up to nearest page */
tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
- 4096);
+ PAGE_SIZE);
tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
GFP_KERNEL);
if (!tx_ring->desc) {
@@ -282,8 +283,17 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_buf->page)
continue;
- dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
- __free_pages(rx_buf->page, 0);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(dev, rx_buf->dma,
+ rx_buf->page_offset,
+ ICE_RXBUF_2048, DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
rx_buf->page = NULL;
rx_buf->page_offset = 0;
@@ -339,9 +349,9 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return -ENOMEM;
- /* round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc);
- rx_ring->size = ALIGN(rx_ring->size, 4096);
+ /* round up to nearest page */
+ rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
GFP_KERNEL);
if (!rx_ring->desc) {
@@ -389,8 +399,8 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
* Returns true if the page was successfully allocated or
* reused.
*/
-static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
- struct ice_rx_buf *bi)
+static bool
+ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
{
struct page *page = bi->page;
dma_addr_t dma;
@@ -409,7 +419,8 @@ static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
@@ -423,6 +434,8 @@ static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
+ page_ref_add(page, USHRT_MAX - 1);
+ bi->pagecnt_bias = USHRT_MAX;
return true;
}
@@ -444,7 +457,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
if (!rx_ring->netdev || !cleaned_count)
return false;
- /* get the RX descriptor and buffer based on next_to_use */
+ /* get the Rx descriptor and buffer based on next_to_use */
rx_desc = ICE_RX_DESC(rx_ring, ntu);
bi = &rx_ring->rx_buf[ntu];
@@ -452,6 +465,12 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
if (!ice_alloc_mapped_page(rx_ring, bi))
goto no_bufs;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ ICE_RXBUF_2048,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
@@ -497,61 +516,43 @@ static bool ice_page_is_reserved(struct page *page)
}
/**
- * ice_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_buf: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
- * @skb: sk_buf to place the data into
- *
- * This function will add the data contained in rx_buf->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
+ * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
+ * @rx_buf: Rx buffer to adjust
+ * @size: Size of adjustment
*
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
+ * Update the offset within page so that Rx buf will be ready to be reused.
+ * For systems with PAGE_SIZE < 8192 this function will flip the page offset
+ * so the second half of page assigned to Rx buffer will be used, otherwise
+ * the offset is moved by the @size bytes
*/
-static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb)
+static void
+ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
{
#if (PAGE_SIZE < 8192)
- unsigned int truesize = ICE_RXBUF_2048;
+ /* flip page offset to other buffer */
+ rx_buf->page_offset ^= size;
#else
- unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
- unsigned int truesize;
-#endif /* PAGE_SIZE < 8192) */
-
- struct page *page;
- unsigned int size;
-
- size = le16_to_cpu(rx_desc->wb.pkt_len) &
- ICE_RX_FLX_DESC_PKT_LEN_M;
-
- page = rx_buf->page;
+ /* move offset up to the next cache line */
+ rx_buf->page_offset += size;
+#endif
+}
+/**
+ * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
+ * @rx_buf: buffer containing the page
+ *
+ * If page is reusable, we have a green light for calling ice_reuse_rx_page,
+ * which will assign the current buffer to the buffer that next_to_alloc is
+ * pointing to; otherwise, the DMA mapping needs to be destroyed and
+ * page freed
+ */
+static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
+{
#if (PAGE_SIZE >= 8192)
- truesize = ALIGN(size, L1_CACHE_BYTES);
-#endif /* PAGE_SIZE >= 8192) */
-
- /* will the data fit in the skb we allocated? if so, just
- * copy it as it is pretty small anyway
- */
- if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buf->page_offset;
-
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!ice_page_is_reserved(page)))
- return true;
-
- /* this page cannot be reused so discard it */
- __free_pages(page, 0);
- return false;
- }
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buf->page_offset, size, truesize);
+ unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
+#endif
+ unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
+ struct page *page = rx_buf->page;
/* avoid re-using remote pages */
if (unlikely(ice_page_is_reserved(page)))
@@ -559,36 +560,61 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
+ if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
-
- /* flip page offset to other buffer */
- rx_buf->page_offset ^= truesize;
#else
- /* move offset up to the next cache line */
- rx_buf->page_offset += truesize;
-
if (rx_buf->page_offset > last_offset)
return false;
#endif /* PAGE_SIZE < 8192) */
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
*/
- get_page(rx_buf->page);
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX - 1);
+ rx_buf->pagecnt_bias = USHRT_MAX;
+ }
return true;
}
/**
+ * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
+ * @rx_buf: buffer containing page to add
+ * @skb: sk_buff to place the data into
+ * @size: packet length from rx_desc
+ *
+ * This function will add the data contained in rx_buf->page to the skb.
+ * It will just attach the page as a frag to the skb.
+ * The function will then update the page offset.
+ */
+static void
+ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
+ unsigned int size)
+{
+#if (PAGE_SIZE >= 8192)
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#else
+ unsigned int truesize = ICE_RXBUF_2048;
+#endif
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
+ rx_buf->page_offset, size, truesize);
+
+ /* page is being used so we must update the page offset */
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+}
+
+/**
* ice_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: Rx descriptor ring to store buffers on
* @old_buf: donor buffer to have page reused
*
* Synchronizes page for reuse by the adapter
*/
-static void ice_reuse_rx_page(struct ice_ring *rx_ring,
- struct ice_rx_buf *old_buf)
+static void
+ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
{
u16 nta = rx_ring->next_to_alloc;
struct ice_rx_buf *new_buf;
@@ -599,121 +625,132 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring,
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- /* transfer page from old buffer to new buffer */
- *new_buf = *old_buf;
+ /* Transfer page from old buffer to new buffer.
+ * Move each member individually to avoid possible store
+ * forwarding stalls and unnecessary copy of skb.
+ */
+ new_buf->dma = old_buf->dma;
+ new_buf->page = old_buf->page;
+ new_buf->page_offset = old_buf->page_offset;
+ new_buf->pagecnt_bias = old_buf->pagecnt_bias;
}
/**
- * ice_fetch_rx_buf - Allocate skb and populate it
+ * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on
- * @rx_desc: descriptor containing info written by hardware
+ * @skb: skb to be used
+ * @size: size of buffer to add to skb
*
- * This function allocates an skb on the fly, and populates it with the page
- * data from the current receive descriptor, taking care to set up the skb
- * correctly, as well as handling calling the page recycle function if
- * necessary.
+ * This function will pull an Rx buffer from the ring and synchronize it
+ * for use by the CPU.
*/
-static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc)
+static struct ice_rx_buf *
+ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
+ const unsigned int size)
{
struct ice_rx_buf *rx_buf;
- struct sk_buff *skb;
- struct page *page;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
- page = rx_buf->page;
- prefetchw(page);
+ prefetchw(rx_buf->page);
+ *skb = rx_buf->skb;
- skb = rx_buf->skb;
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
+ rx_buf->page_offset, size,
+ DMA_FROM_DEVICE);
- if (likely(!skb)) {
- u8 *page_addr = page_address(page) + rx_buf->page_offset;
+ /* We have pulled a buffer for use, so decrement pagecnt_bias */
+ rx_buf->pagecnt_bias--;
- /* prefetch first cache line of first page */
- prefetch(page_addr);
+ return rx_buf;
+}
+
+/**
+ * ice_construct_skb - Allocate skb and populate it
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buf: Rx buffer to pull data from
+ * @size: the length of the packet
+ *
+ * This function allocates an skb. It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
+ */
+static struct sk_buff *
+ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ unsigned int size)
+{
+ void *va = page_address(rx_buf->page) + rx_buf->page_offset;
+ unsigned int headlen;
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
#if L1_CACHE_BYTES < 128
- prefetch((void *)(page_addr + L1_CACHE_BYTES));
+ prefetch((u8 *)va + L1_CACHE_BYTES);
#endif /* L1_CACHE_BYTES */
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- ICE_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_buf_failed++;
- return NULL;
- }
-
- /* we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
- skb_record_rx_queue(skb, rx_ring->q_index);
- } else {
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
- rx_buf->page_offset,
- ICE_RXBUF_2048,
- DMA_FROM_DEVICE);
+ skb_record_rx_queue(skb, rx_ring->q_index);
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > ICE_RX_HDR_SIZE)
+ headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
- rx_buf->skb = NULL;
- }
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
- /* pull page into skb */
- if (ice_add_rx_frag(rx_buf, rx_desc, skb)) {
- /* hand second half of page back to the ring */
- ice_reuse_rx_page(rx_ring, rx_buf);
- rx_ring->rx_stats.page_reuse_count++;
+ /* if we exhaust the linear part then add what is left as a frag */
+ size -= headlen;
+ if (size) {
+#if (PAGE_SIZE >= 8192)
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#else
+ unsigned int truesize = ICE_RXBUF_2048;
+#endif
+ skb_add_rx_frag(skb, 0, rx_buf->page,
+ rx_buf->page_offset + headlen, size, truesize);
+ /* buffer is used by skb, update page_offset */
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ /* buffer is unused, reset bias back to rx_buf; data was copied
+ * onto skb's linear part so there's no need for adjusting
+ * page offset and we can reuse this buffer as-is
+ */
+ rx_buf->pagecnt_bias++;
}
- /* clear contents of buffer_info */
- rx_buf->page = NULL;
-
return skb;
}
/**
- * ice_pull_tail - ice specific version of skb_pull_tail
- * @skb: pointer to current skb being adjusted
+ * ice_put_rx_buf - Clean up used buffer and either recycle or free
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buf: Rx buffer to pull data from
*
- * This function is an ice specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
+ * This function will clean up the contents of the rx_buf. It will
+ * either recycle the buffer or unmap it and free the associated resources.
*/
-static void ice_pull_tail(struct sk_buff *skb)
+static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int pull_len;
- unsigned char *va;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+ /* hand second half of page back to the ring */
+ if (ice_can_reuse_rx_page(rx_buf)) {
+ ice_reuse_rx_page(rx_ring, rx_buf);
+ rx_ring->rx_stats.page_reuse_count++;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
+ }
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
+ /* clear contents of buffer_info */
+ rx_buf->page = NULL;
+ rx_buf->skb = NULL;
}
/**
@@ -730,10 +767,6 @@ static void ice_pull_tail(struct sk_buff *skb)
*/
static bool ice_cleanup_headers(struct sk_buff *skb)
{
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- ice_pull_tail(skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -751,8 +784,8 @@ static bool ice_cleanup_headers(struct sk_buff *skb)
* The status_error_len doesn't need to be shifted because it begins
* at offset zero.
*/
-static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
- const u16 stat_err_bits)
+static bool
+ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
{
return !!(rx_desc->wb.status_error0 &
cpu_to_le16(stat_err_bits));
@@ -769,9 +802,9 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
* sk_buff in the next buffer to be chained and return true indicating
* that this is in fact a non-EOP buffer.
*/
-static bool ice_is_non_eop(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb)
+static bool
+ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb)
{
u32 ntc = rx_ring->next_to_clean + 1;
@@ -838,8 +871,9 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
*
* skb->protocol must be set before this function is called
*/
-static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
- union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
+static void
+ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
+ union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
{
struct ice_rx_ptype_decoded decoded;
u32 rx_error, rx_status;
@@ -909,9 +943,10 @@ checksum_fail:
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
*/
-static void ice_process_skb_fields(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u8 ptype)
+static void
+ice_process_skb_fields(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 ptype)
{
ice_rx_hash(rx_ring, rx_desc, skb, ptype);
@@ -925,18 +960,17 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring,
* ice_receive_skb - Send a completed packet up the stack
* @rx_ring: Rx ring in play
* @skb: packet to send up
- * @vlan_tag: vlan tag for packet
+ * @vlan_tag: VLAN tag for packet
*
* This function sends the completed packet (via. skb) up the stack using
- * gro receive functions (with/without vlan tag)
+ * gro receive functions (with/without VLAN tag)
*/
-static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
- u16 vlan_tag)
+static void
+ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
{
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (vlan_tag & VLAN_VID_MASK)) {
+ (vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- }
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
@@ -958,10 +992,12 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
bool failure = false;
- /* start the loop to process RX packets bounded by 'budget' */
+ /* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
+ struct ice_rx_buf *rx_buf;
struct sk_buff *skb;
+ unsigned int size;
u16 stat_err_bits;
u16 vlan_tag = 0;
u8 rx_ptype;
@@ -973,7 +1009,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
cleaned_count = 0;
}
- /* get the RX desc from RX ring based on 'next_to_clean' */
+ /* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
/* status_error_len will always be zero for unused descriptors
@@ -991,11 +1027,24 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
*/
dma_rmb();
+ size = le16_to_cpu(rx_desc->wb.pkt_len) &
+ ICE_RX_FLX_DESC_PKT_LEN_M;
+
+ rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
/* allocate (if needed) and populate skb */
- skb = ice_fetch_rx_buf(rx_ring, rx_desc);
- if (!skb)
+ if (skb)
+ ice_add_rx_frag(rx_buf, skb, size);
+ else
+ skb = ice_construct_skb(rx_ring, rx_buf, size);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buf_failed++;
+ rx_buf->pagecnt_bias++;
break;
+ }
+ ice_put_rx_buf(rx_ring, rx_buf);
cleaned_count++;
/* skip if it is NOP desc */
@@ -1049,17 +1098,247 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
}
/**
+ * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
+ * @port_info: port_info structure containing the current link speed
+ * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
+ * @itr: itr value to update
+ *
+ * Calculate how big of an increment should be applied to the ITR value passed
+ * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
+ * link speed.
+ *
+ * The following is a calculation derived from:
+ * wmem_default / (size + overhead) = desired_pkts_per_int
+ * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
+ * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
+ *
+ * Assuming wmem_default is 212992 and overhead is 640 bytes per
+ * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
+ * formula down to:
+ *
+ * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24
+ * ITR = -------------------------------------------- * --------------
+ * rate pkt_size + 640
+ */
+static unsigned int
+ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
+ unsigned int avg_pkt_size,
+ unsigned int itr)
+{
+ switch (port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ /* fall through */
+ default:
+ itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ }
+
+ if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= ICE_ITR_ADAPTIVE_LATENCY;
+ itr += ICE_ITR_ADAPTIVE_MAX_USECS;
+ }
+
+ return itr;
+}
+
+/**
+ * ice_update_itr - update the adaptive ITR value based on statistics
+ * @q_vector: structure containing interrupt and ring information
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ */
+static void
+ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
+{
+ unsigned long next_update = jiffies;
+ unsigned int packets, bytes, itr;
+ bool container_is_rx;
+
+ if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
+ return;
+
+ /* If itr_countdown is set it means we programmed an ITR within
+ * the last 4 interrupt cycles. This has a side effect of us
+ * potentially firing an early interrupt. In order to work around
+ * this we need to throw out any data received for a few
+ * interrupts following the update.
+ */
+ if (q_vector->itr_countdown) {
+ itr = rc->target_itr;
+ goto clear_counts;
+ }
+
+ container_is_rx = (&q_vector->rx == rc);
+ /* For Rx we want to push the delay up and default to low latency.
+ * for Tx we want to pull the delay down and default to high latency.
+ */
+ itr = container_is_rx ?
+ ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
+ ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
+
+ /* If we didn't update within up to 1 - 2 jiffies we can assume
+ * that either packets are coming in so slow there hasn't been
+ * any work, or that there is so much work that NAPI is dealing
+ * with interrupt moderation and we don't need to do anything.
+ */
+ if (time_after(next_update, rc->next_update))
+ goto clear_counts;
+
+ packets = rc->total_pkts;
+ bytes = rc->total_bytes;
+
+ if (container_is_rx) {
+ /* If Rx there are 1 to 4 packets and bytes are less than
+ * 9000 assume insufficient data to use bulk rate limiting
+ * approach unless Tx is already in bulk rate limiting. We
+ * are likely latency driven.
+ */
+ if (packets && packets < 4 && bytes < 9000 &&
+ (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
+ itr = ICE_ITR_ADAPTIVE_LATENCY;
+ goto adjust_by_size_and_speed;
+ }
+ } else if (packets < 4) {
+ /* If we have Tx and Rx ITR maxed and Tx ITR is running in
+ * bulk mode and we are receiving 4 or fewer packets just
+ * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
+ * that the Rx can relax.
+ */
+ if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
+ (q_vector->rx.target_itr & ICE_ITR_MASK) ==
+ ICE_ITR_ADAPTIVE_MAX_USECS)
+ goto clear_counts;
+ } else if (packets > 32) {
+ /* If we have processed over 32 packets in a single interrupt
+ * for Tx assume we need to switch over to "bulk" mode.
+ */
+ rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
+ }
+
+ /* We have no packets to actually measure against. This means
+ * either one of the other queues on this vector is active or
+ * we are a Tx queue doing TSO with too high of an interrupt rate.
+ *
+ * Between 4 and 56 we can assume that our current interrupt delay
+ * is only slightly too low. As such we should increase it by a small
+ * fixed amount.
+ */
+ if (packets < 56) {
+ itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
+ if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= ICE_ITR_ADAPTIVE_LATENCY;
+ itr += ICE_ITR_ADAPTIVE_MAX_USECS;
+ }
+ goto clear_counts;
+ }
+
+ if (packets <= 256) {
+ itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
+ itr &= ICE_ITR_MASK;
+
+ /* Between 56 and 112 is our "goldilocks" zone where we are
+ * working out "just right". Just report that our current
+ * ITR is good for us.
+ */
+ if (packets <= 112)
+ goto clear_counts;
+
+ /* If packet count is 128 or greater we are likely looking
+ * at a slight overrun of the delay we want. Try halving
+ * our delay to see if that will cut the number of packets
+ * in half per interrupt.
+ */
+ itr >>= 1;
+ itr &= ICE_ITR_MASK;
+ if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
+ itr = ICE_ITR_ADAPTIVE_MIN_USECS;
+
+ goto clear_counts;
+ }
+
+ /* The paths below assume we are dealing with a bulk ITR since
+ * number of packets is greater than 256. We are just going to have
+ * to compute a value and try to bring the count under control,
+ * though for smaller packet sizes there isn't much we can do as
+ * NAPI polling will likely be kicking in sooner rather than later.
+ */
+ itr = ICE_ITR_ADAPTIVE_BULK;
+
+adjust_by_size_and_speed:
+
+ /* based on checks above packets cannot be 0 so division is safe */
+ itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
+ bytes / packets, itr);
+
+clear_counts:
+ /* write back value */
+ rc->target_itr = itr;
+
+ /* next update should occur within next jiffy */
+ rc->next_update = next_update + 1;
+
+ rc->total_bytes = 0;
+ rc->total_pkts = 0;
+}
+
+/**
* ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
* @itr_idx: interrupt throttling index
- * @reg_itr: interrupt throttling value adjusted based on ITR granularity
+ * @itr: interrupt throttling value in usecs
*/
-static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
+static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
{
+ /* The itr value is reported in microseconds, and the register value is
+ * recorded in 2 microsecond units. For this reason we only need to
+ * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
+ * granularity as a shift instead of division. The mask makes sure the
+ * ITR value is never odd so we don't accidentally write into the field
+ * prior to the ITR field.
+ */
+ itr &= ICE_ITR_MASK;
+
return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
- (reg_itr << GLINT_DYN_CTL_INTERVAL_S);
+ (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
}
+/* The act of updating the ITR will cause it to immediately trigger. In order
+ * to prevent this from throwing off adaptive update statistics we defer the
+ * update so that it can only happen so often. So after either Tx or Rx are
+ * updated we make the adaptive scheme wait until either the ITR completely
+ * expires via the next_update expiration or we have been through at least
+ * 3 interrupts.
+ */
+#define ITR_COUNTDOWN_START 3
+
/**
* ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
* @vsi: the VSI associated with the q_vector
@@ -1068,10 +1347,14 @@ static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
static void
ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_ring_container *rc;
+ struct ice_ring_container *tx = &q_vector->tx;
+ struct ice_ring_container *rx = &q_vector->rx;
u32 itr_val;
+ /* This will do nothing if dynamic updates are not enabled */
+ ice_update_itr(q_vector, tx);
+ ice_update_itr(q_vector, rx);
+
/* This block of logic allows us to get away with only updating
* one ITR value with each interrupt. The idea is to perform a
* pseudo-lazy update with the following criteria.
@@ -1080,35 +1363,36 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
* 2. If we must reduce an ITR that is given highest priority.
* 3. We then give priority to increasing ITR based on amount.
*/
- if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
- rc = &q_vector->rx;
+ if (rx->target_itr < rx->current_itr) {
/* Rx ITR needs to be reduced, this is highest priority */
- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
- rc->current_itr = rc->target_itr;
- } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
- ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
- (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
- rc = &q_vector->tx;
+ itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
+ rx->current_itr = rx->target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if ((tx->target_itr < tx->current_itr) ||
+ ((rx->target_itr - rx->current_itr) <
+ (tx->target_itr - tx->current_itr))) {
/* Tx ITR needs to be reduced, this is second priority
* Tx ITR needs to be increased more than Rx, fourth priority
*/
- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
- rc->current_itr = rc->target_itr;
- } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
- rc = &q_vector->rx;
+ itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
+ tx->current_itr = tx->target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if (rx->current_itr != rx->target_itr) {
/* Rx ITR needs to be increased, third priority */
- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
- rc->current_itr = rc->target_itr;
+ itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
+ rx->current_itr = rx->target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else {
/* Still have to re-enable the interrupts */
itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
+ if (q_vector->itr_countdown)
+ q_vector->itr_countdown--;
}
- if (!test_bit(__ICE_DOWN, vsi->state)) {
- int vector = vsi->hw_base_vector + q_vector->v_idx;
-
- wr32(hw, GLINT_DYN_CTL(vector), itr_val);
- }
+ if (!test_bit(__ICE_DOWN, vsi->state))
+ wr32(&vsi->back->hw,
+ GLINT_DYN_CTL(q_vector->reg_idx),
+ itr_val);
}
/**
@@ -1354,7 +1638,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
}
@@ -1475,7 +1759,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
}
/**
- * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
* @tx_ring: ring to send buffer on
* @first: pointer to struct ice_tx_buf
*
@@ -1501,7 +1785,7 @@ ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
* to the encapsulated ethertype.
*/
skb->protocol = vlan_get_protocol(skb);
- goto out;
+ return 0;
}
/* if we have a HW VLAN tag being added, default to the HW one */
@@ -1523,8 +1807,7 @@ ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
}
-out:
- return 0;
+ return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
}
/**
@@ -1561,6 +1844,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (err < 0)
return err;
+ /* cppcheck-suppress unreadVariable */
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index fc358ea81816..66e05032ee56 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -45,8 +45,13 @@
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
+#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
+#define ICE_TX_FLAGS_VLAN_PR_S 29
#define ICE_TX_FLAGS_VLAN_S 16
+#define ICE_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
struct ice_tx_buf {
struct ice_tx_desc *next_to_watch;
struct sk_buff *skb;
@@ -73,6 +78,7 @@ struct ice_rx_buf {
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
+ u16 pagecnt_bias;
};
struct ice_q_stats {
@@ -124,11 +130,19 @@ enum ice_rx_dtype {
#define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */
#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC)
-#define ICE_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
+#define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
+#define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK)
+#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002
+#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002
+#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA
+#define ICE_ITR_ADAPTIVE_LATENCY 0x8000
+#define ICE_ITR_ADAPTIVE_BULK 0x0000
+
#define ICE_DFLT_INTRL 0
+#define ICE_MAX_INTRL 236
/* Legacy or Advanced Mode Queue */
#define ICE_TX_ADVANCED 0
@@ -149,6 +163,9 @@ struct ice_ring {
};
u16 q_index; /* Queue number of ring */
u32 txq_teid; /* Added Tx queue TEID */
+#ifdef CONFIG_DCB
+ u8 dcb_tc; /* Traffic class of ring */
+#endif /* CONFIG_DCB */
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -173,21 +190,13 @@ struct ice_ring {
u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp;
-enum ice_latency_range {
- ICE_LOWEST_LATENCY = 0,
- ICE_LOW_LATENCY = 1,
- ICE_BULK_LATENCY = 2,
- ICE_ULTRA_LATENCY = 3,
-};
-
struct ice_ring_container {
/* head of linked-list of rings */
struct ice_ring *ring;
unsigned long next_update; /* jiffies value of next queue update */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_pkts; /* total packets processed this int */
- enum ice_latency_range latency_range;
- int itr_idx; /* index in the interrupt vector */
+ u16 itr_idx; /* index in the interrupt vector */
u16 target_itr; /* value in usecs divided by the hw->itr_gran */
u16 current_itr; /* value in usecs divided by the hw->itr_gran */
/* high bit set means dynamic ITR, rest is used to store user
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 17086d5b5c33..a862af4cbf78 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -24,6 +24,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
/* debug masks - set these bits in hw->debug_mask to control output */
#define ICE_DBG_INIT BIT_ULL(1)
#define ICE_DBG_LINK BIT_ULL(4)
+#define ICE_DBG_PHY BIT_ULL(5)
#define ICE_DBG_QCTX BIT_ULL(6)
#define ICE_DBG_NVM BIT_ULL(7)
#define ICE_DBG_LAN BIT_ULL(8)
@@ -106,7 +107,7 @@ struct ice_link_status {
};
/* Different reset sources for which a disable queue AQ call has to be made in
- * order to clean the TX scheduler as a part of the reset
+ * order to clean the Tx scheduler as a part of the reset
*/
enum ice_disq_rst_src {
ICE_NO_RESET = 0,
@@ -128,11 +129,11 @@ struct ice_phy_info {
struct ice_hw_common_caps {
u32 valid_functions;
- /* TX/RX queues */
- u16 num_rxq; /* Number/Total RX queues */
- u16 rxq_first_id; /* First queue ID for RX queues */
- u16 num_txq; /* Number/Total TX queues */
- u16 txq_first_id; /* First queue ID for TX queues */
+ /* Tx/Rx queues */
+ u16 num_rxq; /* Number/Total Rx queues */
+ u16 rxq_first_id; /* First queue ID for Rx queues */
+ u16 num_txq; /* Number/Total Tx queues */
+ u16 txq_first_id; /* First queue ID for Tx queues */
/* MSI-X vectors */
u16 num_msix_vectors;
@@ -147,6 +148,8 @@ struct ice_hw_common_caps {
/* RSS related capabilities */
u16 rss_table_size; /* 512 for PFs and 64 for VFs */
u8 rss_table_entry_width; /* RSS Entry width in bits */
+
+ u8 dcb;
};
/* Function specific capabilities */
@@ -209,12 +212,17 @@ struct ice_nvm_info {
#define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
+#define ice_for_each_traffic_class(_i) \
+ for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++)
+
+#define ICE_INVAL_TEID 0xFFFFFFFF
+
struct ice_sched_node {
struct ice_sched_node *parent;
struct ice_sched_node *sibling; /* next sibling in the same layer */
struct ice_sched_node **children;
struct ice_aqc_txsched_elem_data info;
- u32 agg_id; /* aggregator group id */
+ u32 agg_id; /* aggregator group ID */
u16 vsi_handle;
u8 in_use; /* suspended or in use */
u8 tx_sched_layer; /* Logical Layer (1-9) */
@@ -241,13 +249,12 @@ enum ice_agg_type {
#define ICE_SCHED_DFLT_RL_PROF_ID 0
#define ICE_SCHED_DFLT_BW_WT 1
-/* vsi type list entry to locate corresponding vsi/ag nodes */
+/* VSI type list entry to locate corresponding VSI/ag nodes */
struct ice_sched_vsi_info {
struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
struct list_head list_entry;
u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
- u16 vsi_id;
};
/* driver defines the policy */
@@ -257,15 +264,70 @@ struct ice_sched_tx_policy {
u8 rdma_ena;
};
+/* CEE or IEEE 802.1Qaz ETS Configuration data */
+struct ice_dcb_ets_cfg {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prio_table[ICE_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[ICE_MAX_TRAFFIC_CLASS];
+ u8 tsatable[ICE_MAX_TRAFFIC_CLASS];
+};
+
+/* CEE or IEEE 802.1Qaz PFC Configuration data */
+struct ice_dcb_pfc_cfg {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcena;
+};
+
+/* CEE or IEEE 802.1Qaz Application Priority data */
+struct ice_dcb_app_priority_table {
+ u16 prot_id;
+ u8 priority;
+ u8 selector;
+};
+
+#define ICE_MAX_USER_PRIORITY 8
+#define ICE_DCBX_MAX_APPS 32
+#define ICE_LLDPDU_SIZE 1500
+#define ICE_TLV_STATUS_OPER 0x1
+#define ICE_TLV_STATUS_SYNC 0x2
+#define ICE_TLV_STATUS_ERR 0x4
+#define ICE_APP_PROT_ID_FCOE 0x8906
+#define ICE_APP_PROT_ID_ISCSI 0x0cbc
+#define ICE_APP_PROT_ID_FIP 0x8914
+#define ICE_APP_SEL_ETHTYPE 0x1
+#define ICE_APP_SEL_TCPIP 0x2
+#define ICE_CEE_APP_SEL_ETHTYPE 0x0
+#define ICE_CEE_APP_SEL_TCPIP 0x1
+
+struct ice_dcbx_cfg {
+ u32 numapps;
+ u32 tlv_status; /* CEE mode TLV status */
+ struct ice_dcb_ets_cfg etscfg;
+ struct ice_dcb_ets_cfg etsrec;
+ struct ice_dcb_pfc_cfg pfc;
+ struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
+ u8 dcbx_mode;
+#define ICE_DCBX_MODE_CEE 0x1
+#define ICE_DCBX_MODE_IEEE 0x2
+ u8 app_mode;
+#define ICE_DCBX_APPS_NON_WILLING 0x1
+};
+
struct ice_port_info {
struct ice_sched_node *root; /* Root Node per Port */
- struct ice_hw *hw; /* back pointer to hw instance */
+ struct ice_hw *hw; /* back pointer to HW instance */
u32 last_node_teid; /* scheduler last node info */
u16 sw_id; /* Initial switch ID belongs to port */
u16 pf_vf_num;
u8 port_state;
#define ICE_SCHED_PORT_STATE_INIT 0x0
#define ICE_SCHED_PORT_STATE_READY 0x1
+ u8 lport;
+#define ICE_LPORT_MASK 0xff
u16 dflt_tx_vsi_rule_id;
u16 dflt_tx_vsi_num;
u16 dflt_rx_vsi_rule_id;
@@ -274,9 +336,14 @@ struct ice_port_info {
struct ice_mac_info mac;
struct ice_phy_info phy;
struct mutex sched_lock; /* protect access to TXSched tree */
- u8 lport;
-#define ICE_LPORT_MASK 0xff
- u8 is_vf;
+ struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
+ /* DCBX info */
+ struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
+ struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
+ /* LLDP/DCBX Status */
+ u8 dcbx_status:3; /* see ICE_DCBX_STATUS_DIS */
+ u8 is_sw_lldp:1;
+ u8 is_vf:1;
};
struct ice_switch_info {
@@ -320,7 +387,7 @@ struct ice_hw {
u8 pf_id; /* device profile info */
- /* TX Scheduler values */
+ /* Tx Scheduler values */
u16 num_tx_sched_layers;
u16 num_tx_sched_phys_layers;
u8 flattened_layers;
@@ -331,7 +398,7 @@ struct ice_hw {
struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
u8 evb_veb; /* true for VEB, false for VEPA */
- u8 reset_ongoing; /* true if hw is in reset, false otherwise */
+ u8 reset_ongoing; /* true if HW is in reset, false otherwise */
struct ice_bus_info bus;
struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
@@ -410,6 +477,11 @@ struct ice_hw_port_stats {
u64 link_xoff_rx; /* lxoffrxc */
u64 link_xon_tx; /* lxontxc */
u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
u64 rx_size_64; /* prc64 */
u64 rx_size_127; /* prc127 */
u64 rx_size_255; /* prc255 */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 57155b4a59dc..a805cbdd69be 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -5,6 +5,37 @@
#include "ice_lib.h"
/**
+ * ice_err_to_virt err - translate errors for VF return code
+ * @ice_err: error return code
+ */
+static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
+{
+ switch (ice_err) {
+ case ICE_SUCCESS:
+ return VIRTCHNL_STATUS_SUCCESS;
+ case ICE_ERR_BAD_PTR:
+ case ICE_ERR_INVAL_SIZE:
+ case ICE_ERR_DEVICE_NOT_SUPPORTED:
+ case ICE_ERR_PARAM:
+ case ICE_ERR_CFG:
+ return VIRTCHNL_STATUS_ERR_PARAM;
+ case ICE_ERR_NO_MEMORY:
+ return VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ case ICE_ERR_NOT_READY:
+ case ICE_ERR_RESET_FAILED:
+ case ICE_ERR_FW_API_VER:
+ case ICE_ERR_AQ_ERROR:
+ case ICE_ERR_AQ_TIMEOUT:
+ case ICE_ERR_AQ_FULL:
+ case ICE_ERR_AQ_NO_WORK:
+ case ICE_ERR_AQ_EMPTY:
+ return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ default:
+ return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ }
+}
+
+/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
* @v_opcode: operation code
@@ -14,7 +45,7 @@
*/
static void
ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
- enum ice_status v_retval, u8 *msg, u16 msglen)
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
struct ice_hw *hw = &pf->hw;
struct ice_vf *vf = pf->vf;
@@ -104,7 +135,8 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
ICE_AQ_LINK_UP);
- ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
sizeof(pfe), NULL);
}
@@ -343,11 +375,41 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
}
/**
- * ice_vsi_set_pvid - Set port VLAN id for the VSI
- * @vsi: the VSI being changed
- * @vid: the VLAN id to set as a PVID
+ * ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add PVID
+ * @ctxt: the VSI ctxt to fill
+ * @vid: the VLAN ID to set as a PVID
+ */
+static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
+{
+ ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
+ ICE_AQ_VSI_PVLAN_INSERT_PVID |
+ ICE_AQ_VSI_VLAN_EMOD_STR);
+ ctxt->info.pvid = cpu_to_le16(vid);
+ ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+}
+
+/**
+ * ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove PVID
+ * @ctxt: the VSI ctxt to fill
+ */
+static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
+{
+ ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+ ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+}
+
+/**
+ * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
+ * @vsi: the VSI to update
+ * @vid: the VLAN ID to set as a PVID
+ * @enable: true for enable PVID false for disable
*/
-static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
+static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
{
struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
@@ -359,50 +421,31 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
if (!ctxt)
return -ENOMEM;
- ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
- ICE_AQ_VSI_PVLAN_INSERT_PVID |
- ICE_AQ_VSI_VLAN_EMOD_STR);
- ctxt->info.pvid = cpu_to_le16(vid);
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+ ctxt->info = vsi->info;
+ if (enable)
+ ice_vsi_set_pvid_fill_ctxt(ctxt, vid);
+ else
+ ice_vsi_kill_pvid_fill_ctxt(ctxt);
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
}
- vsi->info.pvid = ctxt->info.pvid;
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
+ vsi->info = ctxt->info;
out:
devm_kfree(dev, ctxt);
return ret;
}
/**
- * ice_vsi_kill_pvid - Remove port VLAN id from the VSI
- * @vsi: the VSI being changed
- */
-static int ice_vsi_kill_pvid(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
-
- if (ice_vsi_manage_vlan_stripping(vsi, false)) {
- dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n",
- vsi->vsi_num);
- return -ENODEV;
- }
-
- vsi->info.pvid = 0;
- return 0;
-}
-
-/**
* ice_vf_vsi_setup - Set up a VF VSI
* @pf: board private structure
* @pi: pointer to the port_info instance
- * @vf_id: defines VF id to which this VSI connects.
+ * @vf_id: defines VF ID to which this VSI connects.
*
* Returns pointer to the successfully allocated VSI struct on success,
* otherwise returns NULL on failure.
@@ -446,8 +489,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
vsi->hw_base_vector += 1;
/* Check if port VLAN exist before, and restore it accordingly */
- if (vf->port_vlan_id)
- ice_vsi_set_pvid(vsi, vf->port_vlan_id);
+ if (vf->port_vlan_id) {
+ ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
+ ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M);
+ }
eth_broadcast_addr(broadcast);
@@ -468,7 +513,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
/* Clear this bit after VF initialization since we shouldn't reclaim
* and reassign interrupts for synchronous or asynchronous VFR events.
- * We dont want to reconfigure interrupts since AVF driver doesn't
+ * We don't want to reconfigure interrupts since AVF driver doesn't
* expect vector assignment to be changed unless there is a request for
* more vectors.
*/
@@ -484,6 +529,8 @@ ice_alloc_vsi_res_exit:
*/
static int ice_alloc_vf_res(struct ice_vf *vf)
{
+ struct ice_pf *pf = vf->pf;
+ int tx_rx_queue_left;
int status;
/* setup VF VSI and necessary resources */
@@ -491,6 +538,15 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
if (status)
goto ice_alloc_vf_res_exit;
+ /* Update number of VF queues, in case VF had requested for queue
+ * changes
+ */
+ tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+ tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
+ if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
+ vf->num_req_qs != vf->num_vf_qs)
+ vf->num_vf_qs = vf->num_req_qs;
+
if (vf->trusted)
set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else
@@ -548,6 +604,10 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
wr32(hw, GLINT_VECT2FUNC(v), reg);
}
+ /* Map mailbox interrupt. We put an explicit 0 here to remind us that
+ * VF admin queue interrupts will go to VF MSI-X vector 0.
+ */
+ wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
/* set regardless of mapping mode */
wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
@@ -750,6 +810,47 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
}
/**
+ * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
+ * @vf: pointer to the VF info
+ * @vsi: the VSI being configured
+ * @promisc_m: mask of promiscuous config bits
+ * @rm_promisc: promisc flag request from the VF to remove or add filter
+ *
+ * This function configures VF VSI promiscuous mode, based on the VF requests,
+ * for Unicast, Multicast and VLAN
+ */
+static enum ice_status
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
+ bool rm_promisc)
+{
+ struct ice_pf *pf = vf->pf;
+ enum ice_status status = 0;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ if (vf->num_vlan) {
+ status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
+ rm_promisc);
+ } else if (vf->port_vlan_id) {
+ if (rm_promisc)
+ status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ vf->port_vlan_id);
+ else
+ status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ vf->port_vlan_id);
+ } else {
+ if (rm_promisc)
+ status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ else
+ status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ }
+
+ return status;
+}
+
+/**
* ice_reset_all_vfs - reset all allocated VFs in one go
* @pf: pointer to the PF structure
* @is_vflr: true if VFLR was issued, false if not
@@ -764,6 +865,7 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
{
struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
int v, i;
/* If we don't have any VFs, then there is nothing to reset */
@@ -778,12 +880,17 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
for (v = 0; v < pf->num_alloc_vfs; v++)
ice_trigger_vf_reset(&pf->vf[v], is_vflr);
- /* Call Disable LAN Tx queue AQ call with VFR bit set and 0
- * queues to inform Firmware about VF reset.
- */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL,
- ICE_VF_RESET, v, NULL);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ struct ice_vsi *vsi;
+
+ vf = &pf->vf[v];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
+ ice_vsi_stop_rx_rings(vsi);
+ clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+ }
+ }
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
@@ -796,9 +903,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
/* Check each VF in sequence */
while (v < pf->num_alloc_vfs) {
- struct ice_vf *vf = &pf->vf[v];
u32 reg;
+ vf = &pf->vf[v];
reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
if (!(reg & VPGEN_VFRSTAT_VFRD_M))
break;
@@ -818,8 +925,18 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
usleep_range(10000, 20000);
/* free VF resources to begin resetting the VSI state */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- ice_free_vf_res(&pf->vf[v]);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ vf = &pf->vf[v];
+
+ ice_free_vf_res(vf);
+
+ /* Free VF queues as well, and reallocate later.
+ * If a given VF has different number of queues
+ * configured, the request for update will come
+ * via mailbox communication.
+ */
+ vf->num_vf_qs = 0;
+ }
if (ice_check_avail_res(pf)) {
dev_err(&pf->pdev->dev,
@@ -828,8 +945,15 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
}
/* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- ice_cleanup_and_realloc_vf(&pf->vf[v]);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ vf = &pf->vf[v];
+
+ vf->num_vf_qs = pf->num_vf_qps;
+ dev_dbg(&pf->pdev->dev,
+ "VF-id %d has %d queues configured\n",
+ vf->vf_id, vf->num_vf_qs);
+ ice_cleanup_and_realloc_vf(vf);
+ }
ice_flush(hw);
clear_bit(__ICE_VF_DIS, pf->state);
@@ -847,9 +971,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
{
struct ice_pf *pf = vf->pf;
- struct ice_hw *hw = &pf->hw;
struct ice_vsi *vsi;
+ struct ice_hw *hw;
bool rsd = false;
+ u8 promisc_m;
u32 reg;
int i;
@@ -871,10 +996,11 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
/* Call Disable LAN Tx queue AQ call even when queues are not
* enabled. This is needed for successful completiom of VFR
*/
- ice_dis_vsi_txq(vsi->port_info, 0, NULL, NULL, ICE_VF_RESET,
- vf->vf_id, NULL);
+ ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
+ NULL, ICE_VF_RESET, vf->vf_id, NULL);
}
+ hw = &pf->hw;
/* poll VPGEN_VFRSTAT reg to make sure
* that reset is complete
*/
@@ -900,6 +1026,21 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
usleep_range(10000, 20000);
+ /* disable promiscuous modes in case they were enabled
+ * ignore any error if disabling process failed
+ */
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ if (vf->port_vlan_id || vf->num_vlan)
+ promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_UCAST_PROMISC_BITS;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
+ dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n");
+ }
+
/* free VF resources to begin resetting the VSI state */
ice_free_vf_res(vf);
@@ -938,7 +1079,7 @@ void ice_vc_notify_reset(struct ice_pf *pf)
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS,
+ ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
(u8 *)&pfe, sizeof(struct virtchnl_pf_event));
}
@@ -961,8 +1102,9 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf)
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0,
- (u8 *)&pfe, sizeof(pfe), NULL);
+ ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
+ NULL);
}
/**
@@ -1012,7 +1154,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
pf->num_alloc_vfs = num_alloc_vfs;
/* VF resources get allocated during reset */
- if (!ice_reset_all_vfs(pf, false))
+ if (!ice_reset_all_vfs(pf, true))
goto err_unroll_sriov;
goto err_unroll_intr;
@@ -1131,21 +1273,10 @@ void ice_process_vflr_event(struct ice_pf *pf)
int vf_id;
u32 reg;
- if (!test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
+ if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
!pf->num_alloc_vfs)
return;
- /* Re-enable the VFLR interrupt cause here, before looking for which
- * VF got reset. Otherwise, if another VF gets a reset while the
- * first one is being processed, that interrupt will be lost, and
- * that VF will be stuck in reset forever.
- */
- reg = rd32(hw, PFINT_OICR_ENA);
- reg |= PFINT_OICR_VFLR_M;
- wr32(hw, PFINT_OICR_ENA, reg);
- ice_flush(hw);
-
- clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
struct ice_vf *vf = &pf->vf[vf_id];
u32 reg_idx, bit_idx;
@@ -1182,8 +1313,9 @@ static void ice_vc_dis_vf(struct ice_vf *vf)
*
* send msg to VF
*/
-static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
- enum ice_status v_retval, u8 *msg, u16 msglen)
+static int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
enum ice_status aq_ret;
struct ice_pf *pf;
@@ -1243,8 +1375,8 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
if (VF_IS_V10(&vf->vf_ver))
info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS,
- (u8 *)&info,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
sizeof(struct virtchnl_version_info));
}
@@ -1257,15 +1389,15 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
*/
static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vf_resource *vfres = NULL;
- enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int len = 0;
int ret;
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto err;
}
@@ -1273,7 +1405,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
if (!vfres) {
- aq_ret = ICE_ERR_NO_MEMORY;
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
len = 0;
goto err;
}
@@ -1286,6 +1418,11 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
if (!vsi->info.pvid)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
@@ -1336,7 +1473,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
err:
/* send the response back to the VF */
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret,
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
(u8 *)vfres, len);
devm_kfree(&pf->pdev->dev, vfres);
@@ -1360,15 +1497,15 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
/**
* ice_find_vsi_from_id
* @pf: the pf structure to search for the VSI
- * @id: id of the VSI it is searching for
+ * @id: ID of the VSI it is searching for
*
- * searches for the VSI with the given id
+ * searches for the VSI with the given ID
*/
static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
{
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
+ ice_for_each_vsi(pf, i)
if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
return pf->vsi[i];
@@ -1378,9 +1515,9 @@ static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
/**
* ice_vc_isvalid_vsi_id
* @vf: pointer to the VF info
- * @vsi_id: VF relative VSI id
+ * @vsi_id: VF relative VSI ID
*
- * check for the valid VSI id
+ * check for the valid VSI ID
*/
static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
@@ -1395,10 +1532,10 @@ static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
/**
* ice_vc_isvalid_q_id
* @vf: pointer to the VF info
- * @vsi_id: VSI id
- * @qid: VSI relative queue id
+ * @vsi_id: VSI ID
+ * @qid: VSI relative queue ID
*
- * check for the valid queue id
+ * check for the valid queue ID
*/
static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
{
@@ -1416,42 +1553,42 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
*/
static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi = NULL;
- enum ice_status aq_ret;
- int ret;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- ret = ice_set_rss(vsi, vrk->key, NULL, 0);
- aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+ if (ice_set_rss(vsi, vrk->key, NULL, 0))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
NULL, 0);
}
@@ -1465,40 +1602,40 @@ error_param:
static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
{
struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi = NULL;
- enum ice_status aq_ret;
- int ret;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE);
- aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+ if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
NULL, 0);
}
@@ -1511,25 +1648,26 @@ error_param:
*/
static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_eth_stats stats;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1540,7 +1678,7 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
(u8 *)&stats, sizeof(stats));
}
@@ -1553,29 +1691,30 @@ error_param:
*/
static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!vqs->rx_queues && !vqs->tx_queues) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1584,15 +1723,15 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
* programmed using ice_vsi_cfg_txqs
*/
if (ice_vsi_start_rx_rings(vsi))
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
/* Set flag to indicate that queues are enabled */
- if (!aq_ret)
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
set_bit(ICE_VF_STATE_ENA, vf->vf_states);
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
NULL, 0);
}
@@ -1606,30 +1745,31 @@ error_param:
*/
static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
!test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!vqs->rx_queues && !vqs->tx_queues) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1637,23 +1777,23 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
dev_err(&vsi->back->pdev->dev,
"Failed to stop tx rings on VSI %d\n",
vsi->vsi_num);
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
}
if (ice_vsi_stop_rx_rings(vsi)) {
dev_err(&vsi->back->pdev->dev,
"Failed to stop rx rings on VSI %d\n",
vsi->vsi_num);
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
}
/* Clear enabled queues flag */
- if (!aq_ret)
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
NULL, 0);
}
@@ -1666,22 +1806,30 @@ error_param:
*/
static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg;
u16 vsi_id, vsi_q_id, vector_id;
struct virtchnl_vector_map *map;
struct ice_vsi *vsi = NULL;
struct ice_pf *pf = vf->pf;
- enum ice_status aq_ret = 0;
unsigned long qmap;
+ u16 num_q_vectors;
int i;
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ num_q_vectors = irqmap_info->num_vectors - ICE_NONQ_VECS_VF;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !vsi || vsi->num_q_vectors < num_q_vectors ||
+ irqmap_info->num_vectors == 0) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- for (i = 0; i < irqmap_info->num_vectors; i++) {
+ for (i = 0; i < num_q_vectors; i++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
map = &irqmap_info->vecmap[i];
vector_id = map->vector_id;
@@ -1689,40 +1837,30 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
/* validate msg params */
if (!(vector_id < pf->hw.func_caps.common_cap
.num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
- if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* lookout for the invalid queue index */
qmap = map->rxq_map;
+ q_vector->num_ring_rx = 0;
for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
- struct ice_q_vector *q_vector;
-
if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- q_vector = vsi->q_vectors[i];
q_vector->num_ring_rx++;
q_vector->rx.itr_idx = map->rxitr_idx;
vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
}
qmap = map->txq_map;
+ q_vector->num_ring_tx = 0;
for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
- struct ice_q_vector *q_vector;
-
if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- q_vector = vsi->q_vectors[i];
q_vector->num_ring_tx++;
q_vector->tx.itr_idx = map->txitr_idx;
vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
@@ -1733,7 +1871,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
ice_vsi_cfg_msix(vsi);
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
NULL, 0);
}
@@ -1746,26 +1884,34 @@ error_param:
*/
static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int i;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "VF-%d requesting more than supported number of queues: %d\n",
+ vf->vf_id, qci->num_queue_pairs);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1775,7 +1921,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
qpi->rxq.vsi_id != qci->vsi_id ||
qpi->rxq.queue_id != qpi->txq.queue_id ||
!ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* copy Tx queue info from VF into VSI */
@@ -1785,13 +1931,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi->rx_buf_len = qpi->rxq.databuffer_size;
if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
qpi->rxq.max_pkt_size < 64) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi->max_frame = qpi->rxq.max_pkt_size;
@@ -1802,15 +1948,16 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
*/
vsi->num_txq = qci->num_queue_pairs;
vsi->num_rxq = qci->num_queue_pairs;
+ /* All queues of VF VSI are in TC 0 */
+ vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs;
+ vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs;
- if (!ice_vsi_cfg_lan_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
- aq_ret = 0;
- else
- aq_ret = ICE_ERR_PARAM;
+ if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
NULL, 0);
}
@@ -1845,18 +1992,18 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
* ice_vc_handle_mac_addr_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @set: true if mac filters are being set, false otherwise
+ * @set: true if MAC filters are being set, false otherwise
*
* add guest MAC address filter
*/
static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
struct ice_pf *pf = vf->pf;
enum virtchnl_ops vc_op;
- enum ice_status ret;
LIST_HEAD(mac_list);
struct ice_vsi *vsi;
int mac_count = 0;
@@ -1869,19 +2016,27 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
!ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
- ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
if (set && !ice_is_vf_trusted(vf) &&
(vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
dev_err(&pf->pdev->dev,
- "Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n");
- ret = ICE_ERR_PARAM;
+ "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
+ vf->vf_id);
+ /* There is no need to let VF know about not being trusted
+ * to add more MAC addr, so we can just return success message.
+ */
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
for (i = 0; i < al->num_elements; i++) {
u8 *maddr = al->list[i].addr;
@@ -1893,40 +2048,39 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
* already added. Just continue.
*/
dev_info(&pf->pdev->dev,
- "mac %pM already set for VF %d\n",
+ "MAC %pM already set for VF %d\n",
maddr, vf->vf_id);
continue;
} else {
- /* VF can't remove dflt_lan_addr/bcast mac */
+ /* VF can't remove dflt_lan_addr/bcast MAC */
dev_err(&pf->pdev->dev,
- "can't remove mac %pM for VF %d\n",
+ "VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
maddr, vf->vf_id);
- ret = ICE_ERR_PARAM;
- goto handle_mac_exit;
+ continue;
}
}
/* check for the invalid cases and bail if necessary */
if (is_zero_ether_addr(maddr)) {
dev_err(&pf->pdev->dev,
- "invalid mac %pM provided for VF %d\n",
+ "invalid MAC %pM provided for VF %d\n",
maddr, vf->vf_id);
- ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
if (is_unicast_ether_addr(maddr) &&
!ice_can_vf_change_mac(vf)) {
dev_err(&pf->pdev->dev,
- "can't change unicast mac for untrusted VF %d\n",
+ "can't change unicast MAC for untrusted VF %d\n",
vf->vf_id);
- ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
- /* get here if maddr is multicast or if VF can change mac */
+ /* get here if maddr is multicast or if VF can change MAC */
if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
- ret = ICE_ERR_NO_MEMORY;
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
goto handle_mac_exit;
}
mac_count++;
@@ -1934,14 +2088,14 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
/* program the updated filter list */
if (set)
- ret = ice_add_mac(&pf->hw, &mac_list);
+ v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list));
else
- ret = ice_remove_mac(&pf->hw, &mac_list);
+ v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list));
- if (ret) {
+ if (v_ret) {
dev_err(&pf->pdev->dev,
- "can't update mac filters for VF %d, error %d\n",
- vf->vf_id, ret);
+ "can't update MAC filters for VF %d, error %d\n",
+ vf->vf_id, v_ret);
} else {
if (set)
vf->num_mac += mac_count;
@@ -1952,7 +2106,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
handle_mac_exit:
ice_free_fltr_list(&pf->pdev->dev, &mac_list);
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0);
+ return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
}
/**
@@ -1987,39 +2141,42 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
* VFs get a default number of queues but can use this message to request a
* different number. If the request is successful, PF will reset the VF and
* return 0. If unsuccessful, PF will send message informing VF of number of
- * available queue pairs via virtchnl message response to vf.
+ * available queue pairs via virtchnl message response to VF.
*/
static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg;
int req_queues = vfres->num_queue_pairs;
- enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
+ int max_allowed_vf_queues;
int tx_rx_queue_left;
int cur_queues;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- cur_queues = pf->num_vf_qps;
+ cur_queues = vf->num_vf_qs;
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+ max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
if (req_queues <= 0) {
dev_err(&pf->pdev->dev,
"VF %d tried to request %d queues. Ignoring.\n",
vf->vf_id, req_queues);
- } else if (req_queues > ICE_MAX_QS_PER_VF) {
+ } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
"VF %d tried to request more than %d queues.\n",
- vf->vf_id, ICE_MAX_QS_PER_VF);
- vfres->num_queue_pairs = ICE_MAX_QS_PER_VF;
+ vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
} else if (req_queues - cur_queues > tx_rx_queue_left) {
dev_warn(&pf->pdev->dev,
"VF %d requested %d more queues, but only %d left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
- vfres->num_queue_pairs = tx_rx_queue_left + cur_queues;
+ vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues,
+ ICE_MAX_BASE_QS_PER_VF);
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
@@ -2033,18 +2190,18 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
error_param:
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
- aq_ret, (u8 *)vfres, sizeof(*vfres));
+ v_ret, (u8 *)vfres, sizeof(*vfres));
}
/**
* ice_set_vf_port_vlan
* @netdev: network interface device structure
* @vf_id: VF identifier
- * @vlan_id: VLAN id being set
+ * @vlan_id: VLAN ID being set
* @qos: priority setting
* @vlan_proto: VLAN protocol
*
- * program VF Port VLAN id and/or qos
+ * program VF Port VLAN ID and/or QoS
*/
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -2087,17 +2244,18 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
return ret;
}
- /* If pvid, then remove all filters on the old VLAN */
+ /* If PVID, then remove all filters on the old VLAN */
if (vsi->info.pvid)
ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
VLAN_VID_MASK));
if (vlan_id || qos) {
- ret = ice_vsi_set_pvid(vsi, vlanprio);
+ ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
if (ret)
goto error_set_pvid;
} else {
- ice_vsi_kill_pvid(vsi);
+ ice_vsi_manage_pvid(vsi, 0, false);
+ vsi->info.pvid = 0;
}
if (vlan_id) {
@@ -2125,52 +2283,60 @@ error_set_pvid:
* @msg: pointer to the msg buffer
* @add_v: Add VLAN if true, otherwise delete VLAN
*
- * Process virtchnl op to add or remove programmed guest VLAN id
+ * Process virtchnl op to add or remove programmed guest VLAN ID
*/
static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
- enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
+ bool vlan_promisc = false;
struct ice_vsi *vsi;
+ struct ice_hw *hw;
+ int status = 0;
+ u8 promisc_m;
int i;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (add_v && !ice_is_vf_trusted(vf) &&
vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
dev_info(&pf->pdev->dev,
- "VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n");
- aq_ret = ICE_ERR_PARAM;
+ "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ vf->vf_id);
+ /* There is no need to let VF know about being not trusted,
+ * so we can just return success message here
+ */
goto error_param;
}
for (i = 0; i < vfl->num_elements; i++) {
if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(&pf->pdev->dev,
"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
goto error_param;
}
}
- vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id);
+ hw = &pf->hw;
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (vsi->info.pvid) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -2178,38 +2344,94 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
dev_err(&pf->pdev->dev,
"%sable VLAN stripping failed for VSI %i\n",
add_v ? "en" : "dis", vsi->vsi_num);
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ vlan_promisc = true;
+
if (add_v) {
for (i = 0; i < vfl->num_elements; i++) {
u16 vid = vfl->vlan_id[i];
- if (!ice_vsi_add_vlan(vsi, vid)) {
- vf->num_vlan++;
+ if (!ice_is_vf_trusted(vf) &&
+ vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ dev_info(&pf->pdev->dev,
+ "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ vf->vf_id);
+ /* There is no need to let VF know about being
+ * not trusted, so we can just return success
+ * message here as well.
+ */
+ goto error_param;
+ }
+
+ if (ice_vsi_add_vlan(vsi, vid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
- /* Enable VLAN pruning when VLAN 0 is added */
- if (unlikely(!vid))
- if (ice_cfg_vlan_pruning(vsi, true))
- aq_ret = ICE_ERR_PARAM;
+ vf->num_vlan++;
+ /* Enable VLAN pruning when VLAN is added */
+ if (!vlan_promisc) {
+ status = ice_cfg_vlan_pruning(vsi, true, false);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
+ vid, status);
+ goto error_param;
+ }
} else {
- aq_ret = ICE_ERR_PARAM;
+ /* Enable Ucast/Mcast VLAN promiscuous mode */
+ promisc_m = ICE_PROMISC_VLAN_TX |
+ ICE_PROMISC_VLAN_RX;
+
+ status = ice_set_vsi_promisc(hw, vsi->idx,
+ promisc_m, vid);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
+ vid, status);
+ }
}
}
} else {
- for (i = 0; i < vfl->num_elements; i++) {
+ /* In case of non_trusted VF, number of VLAN elements passed
+ * to PF for removal might be greater than number of VLANs
+ * filter programmed for that VF - So, use actual number of
+ * VLANS added earlier with add VLAN opcode. In order to avoid
+ * removing VLAN that doesn't exist, which result to sending
+ * erroneous failed message back to the VF
+ */
+ int num_vf_vlan;
+
+ num_vf_vlan = vf->num_vlan;
+ for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
u16 vid = vfl->vlan_id[i];
/* Make sure ice_vsi_kill_vlan is successful before
* updating VLAN information
*/
- if (!ice_vsi_kill_vlan(vsi, vid)) {
- vf->num_vlan--;
+ if (ice_vsi_kill_vlan(vsi, vid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vf->num_vlan--;
+ /* Disable VLAN pruning when removing VLAN */
+ ice_cfg_vlan_pruning(vsi, false, false);
- /* Disable VLAN pruning when removing VLAN 0 */
- if (unlikely(!vid))
- ice_cfg_vlan_pruning(vsi, false);
+ /* Disable Unicast/Multicast VLAN promiscuous mode */
+ if (vlan_promisc) {
+ promisc_m = ICE_PROMISC_VLAN_TX |
+ ICE_PROMISC_VLAN_RX;
+
+ ice_clear_vsi_promisc(hw, vsi->idx,
+ promisc_m, vid);
}
}
}
@@ -2217,10 +2439,10 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
error_param:
/* send the response to the VF */
if (add_v)
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
NULL, 0);
else
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
NULL, 0);
}
@@ -2229,7 +2451,7 @@ error_param:
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * Add and program guest VLAN id
+ * Add and program guest VLAN ID
*/
static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
{
@@ -2241,7 +2463,7 @@ static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * remove programmed guest VLAN id
+ * remove programmed guest VLAN ID
*/
static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
{
@@ -2256,22 +2478,22 @@ static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
*/
static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
{
- enum ice_status aq_ret = 0;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi = pf->vsi[vf->lan_vsi_idx];
if (ice_vsi_manage_vlan_stripping(vsi, true))
- aq_ret = ICE_ERR_AQ_ERROR;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
- aq_ret, NULL, 0);
+ v_ret, NULL, 0);
}
/**
@@ -2282,22 +2504,27 @@ error_param:
*/
static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
{
- enum ice_status aq_ret = 0;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
if (ice_vsi_manage_vlan_stripping(vsi, false))
- aq_ret = ICE_ERR_AQ_ERROR;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
- aq_ret, NULL, 0);
+ v_ret, NULL, 0);
}
/**
@@ -2333,7 +2560,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
/* Perform basic checks on the msg */
err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
if (err) {
- if (err == VIRTCHNL_ERR_PARAM)
+ if (err == VIRTCHNL_STATUS_ERR_PARAM)
err = -EPERM;
else
err = -EINVAL;
@@ -2355,7 +2582,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
error_handler:
if (err) {
- ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0);
+ ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
+ NULL, 0);
dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
vf_id, v_opcode, msglen, err);
return;
@@ -2418,7 +2646,8 @@ error_handler:
default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
v_opcode, vf_id);
- err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL,
+ err = ice_vc_send_msg_to_vf(vf, v_opcode,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
NULL, 0);
break;
}
@@ -2427,7 +2656,7 @@ error_handler:
* as it is busy with pending work.
*/
dev_info(&pf->pdev->dev,
- "PF failed to honor VF %d, opcode %d\n, error %d\n",
+ "PF failed to honor VF %d, opcode %d, error %d\n",
vf_id, v_opcode, err);
}
}
@@ -2440,8 +2669,8 @@ error_handler:
*
* return VF configuration
*/
-int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
- struct ifla_vf_info *ivi)
+int
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -2550,9 +2779,9 @@ out:
* ice_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
- * @mac: mac address
+ * @mac: MAC address
*
- * program VF mac address
+ * program VF MAC address
*/
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
{
@@ -2579,7 +2808,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
return -EINVAL;
}
- /* copy mac into dflt_lan_addr and trigger a VF reset. The reset
+ /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
* flow will use the updated dflt_lan_addr and add a MAC filter
* using ice_add_mac. Also set pf_set_mac to indicate that the PF has
* set the MAC address for this VF.
@@ -2587,7 +2816,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
ether_addr_copy(vf->dflt_lan_addr.addr, mac);
vf->pf_set_mac = true;
netdev_info(netdev,
- "mac on VF %d set to %pM\n. VF driver will be reinitialized\n",
+ "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
vf_id, mac);
ice_vc_dis_vf(vf);
@@ -2690,7 +2919,8 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
/* Notify the VF of its new link state */
- ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
sizeof(pfe), NULL);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 01470a8ee03a..3725aea16840 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -48,10 +48,10 @@ enum ice_virtchnl_cap {
struct ice_vf {
struct ice_pf *pf;
- s16 vf_id; /* VF id in the PF space */
+ s16 vf_id; /* VF ID in the PF space */
u32 driver_caps; /* reported by VF driver */
int first_vector_idx; /* first vector index of this VF */
- struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */
+ struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
struct virtchnl_ether_addr dflt_lan_addr;
u16 port_vlan_id;
@@ -59,10 +59,10 @@ struct ice_vf {
u8 trusted;
u16 lan_vsi_idx; /* index into PF struct */
u16 lan_vsi_num; /* ID as used by firmware */
- u64 num_mdd_events; /* number of mdd events detected */
+ u64 num_mdd_events; /* number of MDD events detected */
u64 num_inval_msgs; /* number of continuous invalid msgs */
u64 num_valid_msgs; /* number of valid msgs detected */
- unsigned long vf_caps; /* vf's adv. capabilities */
+ unsigned long vf_caps; /* VF's adv. capabilities */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
u8 link_forced;
@@ -70,6 +70,7 @@ struct ice_vf {
u8 spoofchk;
u16 num_mac;
u16 num_vlan;
+ u16 num_vf_qs; /* num of queue configured per VF */
u8 num_req_qs; /* num of queue pairs requested by VF */
};
@@ -77,8 +78,8 @@ struct ice_vf {
void ice_process_vflr_event(struct ice_pf *pf);
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
-int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
- struct ifla_vf_info *ivi);
+int
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
void ice_free_vfs(struct ice_pf *pf);
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
@@ -86,11 +87,9 @@ void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
-int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id,
- u16 vlan_id, u8 qos, __be16 vlan_proto);
-
-int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
- int max_tx_rate);
+int
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto);
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
@@ -162,12 +161,5 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
-static inline int
-ice_set_vf_bw(struct net_device __always_unused *netdev,
- int __always_unused vf_id, int __always_unused min_tx_rate,
- int __always_unused max_tx_rate)
-{
- return -EOPNOTSUPP;
-}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_PF_H_ */