summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice/ice_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_main.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c375
1 files changed, 194 insertions, 181 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 6d256dbcb7..61eef3259c 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -613,7 +613,7 @@ skip:
ice_pf_dis_all_vsi(pf, false);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_prepare_for_reset(pf);
+ ice_ptp_prepare_for_reset(pf, reset_type);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_exit(pf);
@@ -803,6 +803,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
switch (vsi->port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_200GB:
+ speed = "200 G";
+ break;
case ICE_AQ_LINK_SPEED_100GB:
speed = "100 G";
break;
@@ -1649,8 +1652,10 @@ static void ice_clean_sbq_subtask(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- /* Nothing to do here if sideband queue is not supported */
- if (!ice_is_sbq_supported(hw)) {
+ /* if mac_type is not generic, sideband is not supported
+ * and there's nothing to do here
+ */
+ if (!ice_is_generic_mac(hw)) {
clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
return;
}
@@ -2668,17 +2673,72 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
bpf_prog_put(old_prog);
}
+static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *ring;
+
+ if (static_key_enabled(&ice_xdp_locking_key))
+ return vsi->xdp_rings[qid % vsi->num_xdp_txq];
+
+ q_vector = vsi->rx_rings[qid]->q_vector;
+ ice_for_each_tx_ring(ring, q_vector->tx)
+ if (ice_ring_is_xdp(ring))
+ return ring;
+
+ return NULL;
+}
+
+/**
+ * ice_map_xdp_rings - Map XDP rings to interrupt vectors
+ * @vsi: the VSI with XDP rings being configured
+ *
+ * Map XDP rings to interrupt vectors and perform the configuration steps
+ * dependent on the mapping.
+ */
+void ice_map_xdp_rings(struct ice_vsi *vsi)
+{
+ int xdp_rings_rem = vsi->num_xdp_txq;
+ int v_idx, q_idx;
+
+ /* follow the logic from ice_vsi_map_rings_to_vectors */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ int xdp_rings_per_v, q_id, q_base;
+
+ xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+ vsi->num_q_vectors - v_idx);
+ q_base = vsi->num_xdp_txq - xdp_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.tx_ring;
+ q_vector->tx.tx_ring = xdp_ring;
+ }
+ xdp_rings_rem -= xdp_rings_per_v;
+ }
+
+ ice_for_each_rxq(vsi, q_idx) {
+ vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
+ q_idx);
+ ice_tx_xsk_pool(vsi, q_idx);
+ }
+}
+
/**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
* @prog: bpf program that will be assigned to VSI
+ * @cfg_type: create from scratch or restore the existing configuration
*
* Return 0 on success and negative value on error
*/
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+ enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- int xdp_rings_rem = vsi->num_xdp_txq;
struct ice_pf *pf = vsi->back;
struct ice_qs_cfg xdp_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
@@ -2691,8 +2751,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
.mapping_mode = ICE_VSI_MAP_CONTIG
};
struct device *dev;
- int i, v_idx;
- int status;
+ int status, i;
dev = ice_pf_to_dev(pf);
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2711,49 +2770,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
if (ice_xdp_alloc_setup_rings(vsi))
goto clear_xdp_rings;
- /* follow the logic from ice_vsi_map_rings_to_vectors */
- ice_for_each_q_vector(vsi, v_idx) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- int xdp_rings_per_v, q_id, q_base;
-
- xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
- vsi->num_q_vectors - v_idx);
- q_base = vsi->num_xdp_txq - xdp_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
-
- xdp_ring->q_vector = q_vector;
- xdp_ring->next = q_vector->tx.tx_ring;
- q_vector->tx.tx_ring = xdp_ring;
- }
- xdp_rings_rem -= xdp_rings_per_v;
- }
-
- ice_for_each_rxq(vsi, i) {
- if (static_key_enabled(&ice_xdp_locking_key)) {
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
- } else {
- struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
- struct ice_tx_ring *ring;
-
- ice_for_each_tx_ring(ring, q_vector->tx) {
- if (ice_ring_is_xdp(ring)) {
- vsi->rx_rings[i]->xdp_ring = ring;
- break;
- }
- }
- }
- ice_tx_xsk_pool(vsi, i);
- }
-
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
*/
- if (ice_is_reset_in_progress(pf->state))
+ if (cfg_type == ICE_XDP_CFG_PART)
return 0;
+ ice_map_xdp_rings(vsi);
+
/* tell the Tx scheduler that right now we have
* additional queues
*/
@@ -2803,22 +2828,21 @@ err_map_xdp:
/**
* ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
* @vsi: VSI to remove XDP rings
+ * @cfg_type: disable XDP permanently or allow it to be restored later
*
* Detach XDP rings from irq vectors, clean up the PF bitmap and free
* resources
*/
-int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
int i, v_idx;
/* q_vectors are freed in reset path so there's no point in detaching
- * rings; in case of rebuild being triggered not from reset bits
- * in pf->state won't be set, so additionally check first q_vector
- * against NULL
+ * rings
*/
- if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ if (cfg_type == ICE_XDP_CFG_PART)
goto free_qmap;
ice_for_each_q_vector(vsi, v_idx) {
@@ -2859,7 +2883,7 @@ free_qmap:
if (static_key_enabled(&ice_xdp_locking_key))
static_branch_dec(&ice_xdp_locking_key);
- if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ if (cfg_type == ICE_XDP_CFG_PART)
return 0;
ice_vsi_assign_bpf_prog(vsi, NULL);
@@ -2970,7 +2994,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
} else {
- xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+ xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
+ ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
@@ -2981,7 +3006,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_features_clear_redirect_target(vsi->netdev);
- xdp_ring_err = ice_destroy_xdp_rings(vsi);
+ xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
/* reallocate Rx queues that were used for zero-copy */
@@ -4572,90 +4597,6 @@ static void ice_decfg_netdev(struct ice_vsi *vsi)
vsi->netdev = NULL;
}
-static int ice_start_eth(struct ice_vsi *vsi)
-{
- int err;
-
- err = ice_init_mac_fltr(vsi->back);
- if (err)
- return err;
-
- err = ice_vsi_open(vsi);
- if (err)
- ice_fltr_remove_all(vsi);
-
- return err;
-}
-
-static void ice_stop_eth(struct ice_vsi *vsi)
-{
- ice_fltr_remove_all(vsi);
- ice_vsi_close(vsi);
-}
-
-static int ice_init_eth(struct ice_pf *pf)
-{
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
- int err;
-
- if (!vsi)
- return -EINVAL;
-
- /* init channel list */
- INIT_LIST_HEAD(&vsi->ch_list);
-
- err = ice_cfg_netdev(vsi);
- if (err)
- return err;
- /* Setup DCB netlink interface */
- ice_dcbnl_setup(vsi);
-
- err = ice_init_mac_fltr(pf);
- if (err)
- goto err_init_mac_fltr;
-
- err = ice_devlink_create_pf_port(pf);
- if (err)
- goto err_devlink_create_pf_port;
-
- SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
-
- err = ice_register_netdev(vsi);
- if (err)
- goto err_register_netdev;
-
- err = ice_tc_indir_block_register(vsi);
- if (err)
- goto err_tc_indir_block_register;
-
- ice_napi_add(vsi);
-
- return 0;
-
-err_tc_indir_block_register:
- ice_unregister_netdev(vsi);
-err_register_netdev:
- ice_devlink_destroy_pf_port(pf);
-err_devlink_create_pf_port:
-err_init_mac_fltr:
- ice_decfg_netdev(vsi);
- return err;
-}
-
-static void ice_deinit_eth(struct ice_pf *pf)
-{
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
-
- if (!vsi)
- return;
-
- ice_vsi_close(vsi);
- ice_unregister_netdev(vsi);
- ice_devlink_destroy_pf_port(pf);
- ice_tc_indir_block_unregister(vsi);
- ice_decfg_netdev(vsi);
-}
-
/**
* ice_wait_for_fw - wait for full FW readiness
* @hw: pointer to the hardware structure
@@ -4681,7 +4622,7 @@ static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
return -ETIMEDOUT;
}
-static int ice_init_dev(struct ice_pf *pf)
+int ice_init_dev(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
@@ -4774,7 +4715,7 @@ err_init_pf:
return err;
}
-static void ice_deinit_dev(struct ice_pf *pf)
+void ice_deinit_dev(struct ice_pf *pf)
{
ice_free_irq_msix_misc(pf);
ice_deinit_pf(pf);
@@ -5079,31 +5020,47 @@ static void ice_deinit(struct ice_pf *pf)
/**
* ice_load - load pf by init hw and starting VSI
* @pf: pointer to the pf instance
+ *
+ * This function has to be called under devl_lock.
*/
int ice_load(struct ice_pf *pf)
{
- struct ice_vsi_cfg_params params = {};
struct ice_vsi *vsi;
int err;
- err = ice_init_dev(pf);
+ devl_assert_locked(priv_to_devlink(pf));
+
+ vsi = ice_get_main_vsi(pf);
+
+ /* init channel list */
+ INIT_LIST_HEAD(&vsi->ch_list);
+
+ err = ice_cfg_netdev(vsi);
if (err)
return err;
- vsi = ice_get_main_vsi(pf);
+ /* Setup DCB netlink interface */
+ ice_dcbnl_setup(vsi);
- params = ice_vsi_to_params(vsi);
- params.flags = ICE_VSI_FLAG_INIT;
+ err = ice_init_mac_fltr(pf);
+ if (err)
+ goto err_init_mac_fltr;
+
+ err = ice_devlink_create_pf_port(pf);
+ if (err)
+ goto err_devlink_create_pf_port;
+
+ SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
- rtnl_lock();
- err = ice_vsi_cfg(vsi, &params);
+ err = ice_register_netdev(vsi);
if (err)
- goto err_vsi_cfg;
+ goto err_register_netdev;
- err = ice_start_eth(ice_get_main_vsi(pf));
+ err = ice_tc_indir_block_register(vsi);
if (err)
- goto err_start_eth;
- rtnl_unlock();
+ goto err_tc_indir_block_register;
+
+ ice_napi_add(vsi);
err = ice_init_rdma(pf);
if (err)
@@ -5117,29 +5074,35 @@ int ice_load(struct ice_pf *pf)
return 0;
err_init_rdma:
- ice_vsi_close(ice_get_main_vsi(pf));
- rtnl_lock();
-err_start_eth:
- ice_vsi_decfg(ice_get_main_vsi(pf));
-err_vsi_cfg:
- rtnl_unlock();
- ice_deinit_dev(pf);
+ ice_tc_indir_block_unregister(vsi);
+err_tc_indir_block_register:
+ ice_unregister_netdev(vsi);
+err_register_netdev:
+ ice_devlink_destroy_pf_port(pf);
+err_devlink_create_pf_port:
+err_init_mac_fltr:
+ ice_decfg_netdev(vsi);
return err;
}
/**
* ice_unload - unload pf by stopping VSI and deinit hw
* @pf: pointer to the pf instance
+ *
+ * This function has to be called under devl_lock.
*/
void ice_unload(struct ice_pf *pf)
{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+ devl_assert_locked(priv_to_devlink(pf));
+
ice_deinit_features(pf);
ice_deinit_rdma(pf);
- rtnl_lock();
- ice_stop_eth(ice_get_main_vsi(pf));
- ice_vsi_decfg(ice_get_main_vsi(pf));
- rtnl_unlock();
- ice_deinit_dev(pf);
+ ice_tc_indir_block_unregister(vsi);
+ ice_unregister_netdev(vsi);
+ ice_devlink_destroy_pf_port(pf);
+ ice_decfg_netdev(vsi);
}
/**
@@ -5237,27 +5200,23 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (err)
goto err_init;
- err = ice_init_eth(pf);
- if (err)
- goto err_init_eth;
-
- err = ice_init_rdma(pf);
+ devl_lock(priv_to_devlink(pf));
+ err = ice_load(pf);
+ devl_unlock(priv_to_devlink(pf));
if (err)
- goto err_init_rdma;
+ goto err_load;
err = ice_init_devlink(pf);
if (err)
goto err_init_devlink;
- ice_init_features(pf);
-
return 0;
err_init_devlink:
- ice_deinit_rdma(pf);
-err_init_rdma:
- ice_deinit_eth(pf);
-err_init_eth:
+ devl_lock(priv_to_devlink(pf));
+ ice_unload(pf);
+ devl_unlock(priv_to_devlink(pf));
+err_load:
ice_deinit(pf);
err_init:
pci_disable_device(pdev);
@@ -5340,8 +5299,6 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
- ice_debugfs_exit();
-
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
@@ -5355,12 +5312,14 @@ static void ice_remove(struct pci_dev *pdev)
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
- ice_deinit_features(pf);
+
ice_deinit_devlink(pf);
- ice_deinit_rdma(pf);
- ice_deinit_eth(pf);
- ice_deinit(pf);
+ devl_lock(priv_to_devlink(pf));
+ ice_unload(pf);
+ devl_unlock(priv_to_devlink(pf));
+
+ ice_deinit(pf);
ice_vsi_release_all(pf);
ice_setup_mc_magic_wake(pf);
@@ -5495,7 +5454,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
*/
disabled = ice_service_task_stop(pf);
- ice_unplug_aux_dev(pf);
+ ice_deinit_rdma(pf);
/* Already suspended?, then there is nothing to do */
if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
@@ -5575,6 +5534,11 @@ static int __maybe_unused ice_resume(struct device *dev)
if (ret)
dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
+ ret = ice_init_rdma(pf);
+ if (ret)
+ dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
+ ret);
+
clear_bit(ICE_DOWN, pf->state);
/* Now perform PF reset and rebuild */
reset_type = ICE_RESET_PFR;
@@ -5753,6 +5717,10 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) },
@@ -5842,6 +5810,7 @@ module_init(ice_module_init);
static void __exit ice_module_exit(void)
{
pci_unregister_driver(&ice_driver);
+ ice_debugfs_exit();
destroy_workqueue(ice_wq);
destroy_workqueue(ice_lag_wq);
pr_info("module unloaded\n");
@@ -7059,6 +7028,50 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ */
+static void ice_vsi_dis_irq(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+ int i;
+
+ /* disable interrupt causation from each Rx queue; Tx queues are
+ * handled in ice_vsi_stop_tx_ring()
+ */
+ if (vsi->rx_rings) {
+ ice_for_each_rxq(vsi, i) {
+ if (vsi->rx_rings[i]) {
+ u16 reg;
+
+ reg = vsi->rx_rings[i]->reg_idx;
+ val = rd32(hw, QINT_RQCTL(reg));
+ val &= ~QINT_RQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_RQCTL(reg), val);
+ }
+ }
+ }
+
+ /* disable each interrupt */
+ ice_for_each_q_vector(vsi, i) {
+ if (!vsi->q_vectors[i])
+ continue;
+ wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
+ }
+
+ ice_flush(hw);
+
+ /* don't call synchronize_irq() for VF's from the host */
+ if (vsi->type == ICE_VSI_VF)
+ return;
+
+ ice_for_each_q_vector(vsi, i)
+ synchronize_irq(vsi->q_vectors[i]->irq.virq);
+}
+
+/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
*
@@ -7547,7 +7560,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
* fail.
*/
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_reset(pf);
+ ice_ptp_rebuild(pf, reset_type);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_init(pf);