summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-03 05:08:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-03 05:08:49 +0000
commit76047c0312414915035f4ebbaa533fe3817a21e0 (patch)
tree86f79ba3c1f753b1d39be793455035794fade031 /drivers/net/ethernet
parentAdding upstream version 6.8.11. (diff)
downloadlinux-76047c0312414915035f4ebbaa533fe3817a21e0.tar.xz
linux-76047c0312414915035f4ebbaa533fe3817a21e0.zip
Adding upstream version 6.8.12.upstream/6.8.12
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/cortina/gemini.c12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c134
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c129
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c22
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c240
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c19
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c6
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c9
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c9
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c105
-rw-r--r--drivers/net/ethernet/sun/sungem.c14
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c14
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c76
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h23
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c269
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h7
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c172
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c90
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h18
52 files changed, 1067 insertions, 620 deletions
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 705c3eb19..d1fbadbf8 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1107,10 +1107,13 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
u32 val, mask;
netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
+ spin_lock_irqsave(&geth->irq_lock, flags);
+
mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
if (en)
@@ -1119,6 +1122,8 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
val = en ? val | mask : val & ~mask;
writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
}
static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
@@ -1415,15 +1420,19 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
union gmac_rxdesc_3 word3;
struct page *page = NULL;
unsigned int page_offs;
+ unsigned long flags;
unsigned short r, w;
union dma_rwptr rw;
dma_addr_t mapping;
int frag_nr = 0;
+ spin_lock_irqsave(&geth->irq_lock, flags);
rw.bits32 = readl(ptr_reg);
/* Reset interrupt as all packages until here are taken into account */
writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
r = rw.bits.rptr;
w = rw.bits.wptr;
@@ -1726,10 +1735,9 @@ static irqreturn_t gmac_irq(int irq, void *data)
gmac_update_hw_stats(netdev);
if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
+ spin_lock(&geth->irq_lock);
writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
-
- spin_lock(&geth->irq_lock);
u64_stats_update_begin(&port->ir_stats_syncp);
++port->stats.rx_fifo_errors;
u64_stats_update_end(&port->ir_stats_syncp);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index bfdbdab44..5e6e1f719 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2769,7 +2769,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
priv->num_tx_rings) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)",
+ "Reserving %d XDP TXQs leaves under %d for stack (total %d)",
num_xdp_tx_queues,
priv->min_num_stack_tx_queues,
priv->num_tx_rings);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8decb1b07..e92a83033 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3678,29 +3678,6 @@ fec_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * fec_poll_controller - FEC Poll controller function
- * @dev: The FEC network adapter
- *
- * Polled functionality used by netconsole and others in non interrupt mode
- *
- */
-static void fec_poll_controller(struct net_device *dev)
-{
- int i;
- struct fec_enet_private *fep = netdev_priv(dev);
-
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- if (fep->irq[i] > 0) {
- disable_irq(fep->irq[i]);
- fec_enet_interrupt(fep->irq[i], dev);
- enable_irq(fep->irq[i]);
- }
- }
-}
-#endif
-
static inline void fec_enet_set_netdev_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -4007,9 +3984,6 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
.ndo_eth_ioctl = phy_do_ioctl_running,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = fec_poll_controller,
-#endif
.ndo_set_features = fec_set_features,
.ndo_bpf = fec_enet_bpf,
.ndo_xdp_xmit = fec_enet_xdp_xmit,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index c979192e4..a545a7917 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -536,7 +536,7 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
*
* Return 0 on success and a negative value on error.
*/
-int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
@@ -631,6 +631,62 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
+{
+ if (q_idx >= vsi->num_rxq)
+ return -EINVAL;
+
+ return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
+ * @vsi: VSI
+ */
+static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+{
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+ vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
+ vsi->rx_buf_len = ICE_RXBUF_1664;
+#if (PAGE_SIZE < 8192)
+ } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
+#endif
+ } else {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+ vsi->rx_buf_len = ICE_RXBUF_3072;
+ }
+}
+
+/**
+ * ice_vsi_cfg_rxqs - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Rx VSI for operation.
+ */
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
+{
+ u16 i;
+
+ if (vsi->type == ICE_VSI_VF)
+ goto setup_rings;
+
+ ice_vsi_cfg_frame_size(vsi);
+setup_rings:
+ /* set up individual rings */
+ ice_for_each_rxq(vsi, i) {
+ int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
/**
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
* @qs_cfg: gathered variables needed for pf->vsi queues assignment
@@ -826,7 +882,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @ring: Tx ring to be configured
* @qg_buf: queue group buffer
*/
-int
+static int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
@@ -897,6 +953,80 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
return 0;
}
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
+ u16 q_idx)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+
+ if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
+ return -EINVAL;
+
+ qg_buf->num_txqs = 1;
+
+ return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
+}
+
+/**
+ * ice_vsi_cfg_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ * @rings: Tx ring array to be configured
+ * @count: number of Tx ring array elements
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+static int
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+ int err = 0;
+ u16 q_idx;
+
+ qg_buf->num_txqs = 1;
+
+ for (q_idx = 0; q_idx < count; q_idx++) {
+ err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
+{
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
+}
+
+/**
+ * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx queues dedicated for XDP in given VSI for operation.
+ */
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
+{
+ int ret;
+ int i;
+
+ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
+ if (ret)
+ return ret;
+
+ ice_for_each_rxq(vsi, i)
+ ice_tx_xsk_pool(vsi, i);
+
+ return 0;
+}
+
/**
* ice_cfg_itr - configure the initial interrupt throttle values
* @hw: pointer to the HW structure
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index 17321ba75..b711bc921 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -6,7 +6,8 @@
#include "ice.h"
-int ice_vsi_cfg_rxq(struct ice_rx_ring *ring);
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
@@ -14,9 +15,10 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
-int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
- struct ice_aqc_add_tx_qgrp *qg_buf);
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
+ u16 q_idx);
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 10c32cd80..ce50a322d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -4700,7 +4700,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
- DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 i, buf_size = __struct_size(qg_list);
struct ice_q_ctx *q_ctx;
int status = -ENOENT;
@@ -4922,7 +4922,7 @@ int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
- DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 qg_size = __struct_size(qg_list);
struct ice_hw *hw;
int status = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 8b7504a9d..1bf8ee98f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1424,14 +1424,14 @@ ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
goto exit;
}
- conf_idx = le32_to_cpu(seg->signed_seg_idx);
- start = le32_to_cpu(seg->signed_buf_start);
count = le32_to_cpu(seg->signed_buf_count);
-
state = ice_download_pkg_sig_seg(hw, seg);
- if (state)
+ if (state || !count)
goto exit;
+ conf_idx = le32_to_cpu(seg->signed_seg_idx);
+ start = le32_to_cpu(seg->signed_buf_start);
+
state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
count);
@@ -1934,8 +1934,8 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
*/
static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
{
- DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
- ICE_PKG_CNT);
+ DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
+ ICE_PKG_CNT);
u16 size = __struct_size(pkg_info);
u32 i;
@@ -1986,8 +1986,8 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
struct ice_pkg_hdr *ospkg,
struct ice_seg **seg)
{
- DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
- ICE_PKG_CNT);
+ DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
+ ICE_PKG_CNT);
u16 size = __struct_size(pkg);
enum ice_ddp_state state;
u32 i;
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index a7a342809..f0e76f0a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -491,7 +491,7 @@ static void
ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
u16 vsi_num, u8 tc)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
@@ -849,7 +849,7 @@ static void
ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u8 tc)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
@@ -1873,7 +1873,7 @@ static void
ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 vsi_num, u8 tc)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index cfc20684f..15bdf6ef3 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1672,27 +1672,6 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
}
/**
- * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
- * @vsi: VSI
- */
-static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
-{
- if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
- vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
- vsi->rx_buf_len = ICE_RXBUF_1664;
-#if (PAGE_SIZE < 8192)
- } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
- (vsi->netdev->mtu <= ETH_DATA_LEN)) {
- vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
- vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
-#endif
- } else {
- vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- vsi->rx_buf_len = ICE_RXBUF_3072;
- }
-}
-
-/**
* ice_pf_state_is_nominal - checks the PF for nominal state
* @pf: pointer to PF to check
*
@@ -1795,114 +1774,6 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
}
-int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
-{
- if (q_idx >= vsi->num_rxq)
- return -EINVAL;
-
- return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
-}
-
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
-{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
-
- if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
- return -EINVAL;
-
- qg_buf->num_txqs = 1;
-
- return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
-}
-
-/**
- * ice_vsi_cfg_rxqs - Configure the VSI for Rx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Rx VSI for operation.
- */
-int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
-{
- u16 i;
-
- if (vsi->type == ICE_VSI_VF)
- goto setup_rings;
-
- ice_vsi_cfg_frame_size(vsi);
-setup_rings:
- /* set up individual rings */
- ice_for_each_rxq(vsi, i) {
- int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
-
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_vsi_cfg_txqs - Configure the VSI for Tx
- * @vsi: the VSI being configured
- * @rings: Tx ring array to be configured
- * @count: number of Tx ring array elements
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx VSI for operation.
- */
-static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
-{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
- int err = 0;
- u16 q_idx;
-
- qg_buf->num_txqs = 1;
-
- for (q_idx = 0; q_idx < count; q_idx++) {
- err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
- if (err)
- break;
- }
-
- return err;
-}
-
-/**
- * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx VSI for operation.
- */
-int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
-{
- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
-}
-
-/**
- * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx queues dedicated for XDP in given VSI for operation.
- */
-int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
-{
- int ret;
- int i;
-
- ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
- if (ret)
- return ret;
-
- ice_for_each_rxq(vsi, i)
- ice_tx_xsk_pool(vsi, i);
-
- return 0;
-}
-
/**
* ice_intrl_usec_to_reg - convert interrupt rate limit to register value
* @intrl: interrupt rate limit in usecs
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index bfcfc582a..b5a1ed7cc 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -54,14 +54,6 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf);
void ice_update_eth_stats(struct ice_vsi *vsi);
-int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
-
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
-
-int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
-
-int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
-
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
@@ -72,8 +64,6 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
-int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
-
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index d174a4eeb..a1525992d 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -237,7 +237,7 @@ static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
u32 node_teid)
{
- DEFINE_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
u16 buf_size = __struct_size(buf);
u16 num_groups_removed = 0;
int status;
@@ -2219,7 +2219,7 @@ int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
u16 buf_len = __struct_size(buf);
struct ice_sched_node *node;
u16 i, grps_movd = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index ba0ef91e4..b4ea935e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1812,7 +1812,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf);
struct ice_aqc_res_elem *vsi_ele;
int status;
@@ -2081,7 +2081,7 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
*/
int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf);
int status;
@@ -4420,7 +4420,7 @@ int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
int status;
@@ -4448,7 +4448,7 @@ int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
int status;
@@ -4478,7 +4478,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
*/
int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
u16 res_type;
int status;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 2eecd0f39..1857220d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -218,42 +218,28 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
*/
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
- u16 size = __struct_size(qg_buf);
struct ice_q_vector *q_vector;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
int err;
- if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
- return -EINVAL;
-
- qg_buf->num_txqs = 1;
-
- tx_ring = vsi->tx_rings[q_idx];
- rx_ring = vsi->rx_rings[q_idx];
- q_vector = rx_ring->q_vector;
-
- err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
+ err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
if (err)
return err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
- memset(qg_buf, 0, size);
- qg_buf->num_txqs = 1;
- err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
+ err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
if (err)
return err;
ice_set_ring_xdp(xdp_ring);
ice_tx_xsk_pool(vsi, q_idx);
}
- err = ice_vsi_cfg_rxq(rx_ring);
+ err = ice_vsi_cfg_single_rxq(vsi, q_idx);
if (err)
return err;
+ q_vector = vsi->rx_rings[q_idx]->q_vector;
ice_qvec_cfg_msix(vsi, q_vector);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 986d429d1..6972d7284 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -376,7 +376,8 @@ static int idpf_set_ringparam(struct net_device *netdev,
new_tx_count);
if (new_tx_count == vport->txq_desc_count &&
- new_rx_count == vport->rxq_desc_count)
+ new_rx_count == vport->rxq_desc_count &&
+ kring->tcp_data_split == idpf_vport_get_hsplit(vport))
goto unlock_mutex;
if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index caa13b9ce..d7d73295f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -110,16 +110,16 @@ static const struct mtk_reg_map mt7986_reg_map = {
.tx_irq_mask = 0x461c,
.tx_irq_status = 0x4618,
.pdma = {
- .rx_ptr = 0x6100,
- .rx_cnt_cfg = 0x6104,
- .pcrx_ptr = 0x6108,
- .glo_cfg = 0x6204,
- .rst_idx = 0x6208,
- .delay_irq = 0x620c,
- .irq_status = 0x6220,
- .irq_mask = 0x6228,
- .adma_rx_dbg0 = 0x6238,
- .int_grp = 0x6250,
+ .rx_ptr = 0x4100,
+ .rx_cnt_cfg = 0x4104,
+ .pcrx_ptr = 0x4108,
+ .glo_cfg = 0x4204,
+ .rst_idx = 0x4208,
+ .delay_irq = 0x420c,
+ .irq_status = 0x4220,
+ .irq_mask = 0x4228,
+ .adma_rx_dbg0 = 0x4238,
+ .int_grp = 0x4250,
},
.qdma = {
.qtx_cfg = 0x4400,
@@ -1107,7 +1107,7 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
}
@@ -1139,7 +1139,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
eth->scratch_ring = eth->sram_base;
else
eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
- cnt * soc->txrx.txd_size,
+ cnt * soc->tx.desc_size,
&eth->phy_scratch_ring,
GFP_KERNEL);
if (unlikely(!eth->scratch_ring))
@@ -1155,17 +1155,17 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
- phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
+ phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
for (i = 0; i < cnt; i++) {
dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
struct mtk_tx_dma_v2 *txd;
- txd = eth->scratch_ring + i * soc->txrx.txd_size;
+ txd = eth->scratch_ring + i * soc->tx.desc_size;
txd->txd1 = addr;
if (i < cnt - 1)
txd->txd2 = eth->phy_scratch_ring +
- (i + 1) * soc->txrx.txd_size;
+ (i + 1) * soc->tx.desc_size;
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
@@ -1416,7 +1416,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (itxd == ring->last_free)
return -ENOMEM;
- itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
memset(itx_buf, 0, sizeof(*itx_buf));
txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
@@ -1457,7 +1457,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
txd_info.size = min_t(unsigned int, frag_size,
- soc->txrx.dma_max_len);
+ soc->tx.dma_max_len);
txd_info.qid = queue;
txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
!(frag_size - txd_info.size);
@@ -1470,7 +1470,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
mtk_tx_set_dma_desc(dev, txd, &txd_info);
tx_buf = mtk_desc_to_tx_buf(ring, txd,
- soc->txrx.txd_size);
+ soc->tx.desc_size);
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
@@ -1513,7 +1513,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
} else {
int next_idx;
- next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
+ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
ring->dma_size);
mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
}
@@ -1522,7 +1522,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma:
do {
- tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
/* unmap dma */
mtk_tx_unmap(eth, tx_buf, NULL, false);
@@ -1547,7 +1547,7 @@ static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
nfrags += DIV_ROUND_UP(skb_frag_size(frag),
- eth->soc->txrx.dma_max_len);
+ eth->soc->tx.dma_max_len);
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
@@ -1654,7 +1654,7 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
ring = &eth->rx_ring[i];
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + idx * eth->soc->rx.desc_size;
if (rxd->rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
@@ -1822,7 +1822,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
}
htxd = txd;
- tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
memset(tx_buf, 0, sizeof(*tx_buf));
htx_buf = tx_buf;
@@ -1841,7 +1841,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
goto unmap;
tx_buf = mtk_desc_to_tx_buf(ring, txd,
- soc->txrx.txd_size);
+ soc->tx.desc_size);
memset(tx_buf, 0, sizeof(*tx_buf));
n_desc++;
}
@@ -1879,7 +1879,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
} else {
int idx;
- idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
+ idx = txd_to_idx(ring, txd, soc->tx.desc_size);
mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
MT7628_TX_CTX_IDX0);
}
@@ -1890,7 +1890,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
unmap:
while (htxd != txd) {
- tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
mtk_tx_unmap(eth, tx_buf, NULL, false);
htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
@@ -2021,14 +2021,14 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto rx_done;
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + idx * eth->soc->rx.desc_size;
data = ring->data[idx];
if (!mtk_rx_get_desc(eth, &trxd, rxd))
break;
/* find out which mac the packet come from. values start at 1 */
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
switch (val) {
@@ -2140,7 +2140,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb->dev = netdev;
bytes += skb->len;
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
if (hash != MTK_RXD5_FOE_ENTRY)
@@ -2156,7 +2156,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
rxdcsum = &trxd.rxd4;
}
- if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
+ if (*rxdcsum & eth->soc->rx.dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -2280,7 +2280,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
break;
tx_buf = mtk_desc_to_tx_buf(ring, desc,
- eth->soc->txrx.txd_size);
+ eth->soc->tx.desc_size);
if (!tx_buf->data)
break;
@@ -2331,7 +2331,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
}
mtk_tx_unmap(eth, tx_buf, &bq, true);
- desc = ring->dma + cpu * eth->soc->txrx.txd_size;
+ desc = ring->dma + cpu * eth->soc->tx.desc_size;
ring->last_free = desc;
atomic_inc(&ring->free_count);
@@ -2421,7 +2421,7 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
do {
int rx_done;
- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
+ mtk_w32(eth, eth->soc->rx.irq_done_mask,
reg_map->pdma.irq_status);
rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
rx_done_total += rx_done;
@@ -2437,10 +2437,10 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
return budget;
} while (mtk_r32(eth, reg_map->pdma.irq_status) &
- eth->soc->txrx.rx_irq_done_mask);
+ eth->soc->rx.irq_done_mask);
if (napi_complete_done(napi, rx_done_total))
- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
return rx_done_total;
}
@@ -2449,7 +2449,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
- int i, sz = soc->txrx.txd_size;
+ int i, sz = soc->tx.desc_size;
struct mtk_tx_dma_v2 *txd;
int ring_size;
u32 ofs, val;
@@ -2572,14 +2572,14 @@ static void mtk_tx_clean(struct mtk_eth *eth)
}
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
dma_free_coherent(eth->dma_dev,
- ring->dma_size * soc->txrx.txd_size,
+ ring->dma_size * soc->tx.desc_size,
ring->dma, ring->phys);
ring->dma = NULL;
}
if (ring->dma_pdma) {
dma_free_coherent(eth->dma_dev,
- ring->dma_size * soc->txrx.txd_size,
+ ring->dma_size * soc->tx.desc_size,
ring->dma_pdma, ring->phys_pdma);
ring->dma_pdma = NULL;
}
@@ -2634,15 +2634,15 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
rx_flag != MTK_RX_FLAGS_NORMAL) {
ring->dma = dma_alloc_coherent(eth->dma_dev,
- rx_dma_size * eth->soc->txrx.rxd_size,
- &ring->phys, GFP_KERNEL);
+ rx_dma_size * eth->soc->rx.desc_size,
+ &ring->phys, GFP_KERNEL);
} else {
struct mtk_tx_ring *tx_ring = &eth->tx_ring;
ring->dma = tx_ring->dma + tx_ring_size *
- eth->soc->txrx.txd_size * (ring_no + 1);
+ eth->soc->tx.desc_size * (ring_no + 1);
ring->phys = tx_ring->phys + tx_ring_size *
- eth->soc->txrx.txd_size * (ring_no + 1);
+ eth->soc->tx.desc_size * (ring_no + 1);
}
if (!ring->dma)
@@ -2653,7 +2653,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
dma_addr_t dma_addr;
void *data;
- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + i * eth->soc->rx.desc_size;
if (ring->page_pool) {
data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr, GFP_KERNEL);
@@ -2690,7 +2690,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
rxd->rxd3 = 0;
rxd->rxd4 = 0;
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
rxd->rxd5 = 0;
rxd->rxd6 = 0;
rxd->rxd7 = 0;
@@ -2744,7 +2744,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_
if (!ring->data[i])
continue;
- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + i * eth->soc->rx.desc_size;
if (!rxd->rxd1)
continue;
@@ -2761,7 +2761,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_
if (!in_sram && ring->dma) {
dma_free_coherent(eth->dma_dev,
- ring->dma_size * eth->soc->txrx.rxd_size,
+ ring->dma_size * eth->soc->rx.desc_size,
ring->dma, ring->phys);
ring->dma = NULL;
}
@@ -3124,7 +3124,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
netdev_reset_queue(eth->netdev[i]);
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
- MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
+ MTK_QDMA_RING_SIZE * soc->tx.desc_size,
eth->scratch_ring, eth->phy_scratch_ring);
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
@@ -3174,7 +3174,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) {
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
__napi_schedule(&eth->rx_napi);
}
@@ -3200,9 +3200,9 @@ static irqreturn_t mtk_handle_irq(int irq, void *_eth)
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
if (mtk_r32(eth, reg_map->pdma.irq_mask) &
- eth->soc->txrx.rx_irq_done_mask) {
+ eth->soc->rx.irq_done_mask) {
if (mtk_r32(eth, reg_map->pdma.irq_status) &
- eth->soc->txrx.rx_irq_done_mask)
+ eth->soc->rx.irq_done_mask)
mtk_handle_irq_rx(irq, _eth);
}
if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
@@ -3220,10 +3220,10 @@ static void mtk_poll_controller(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
mtk_handle_irq_rx(eth->irq[2], dev);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
}
#endif
@@ -3387,7 +3387,7 @@ static int mtk_open(struct net_device *dev)
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
refcount_set(&eth->dma_refcnt, 1);
}
else
@@ -3471,7 +3471,7 @@ static int mtk_stop(struct net_device *dev)
mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
@@ -3893,7 +3893,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
else
mtk_hw_reset(eth);
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
@@ -3947,9 +3947,9 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
/* FE int grouping */
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
+ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
+ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
if (mtk_is_netsys_v3_or_greater(eth)) {
@@ -5039,11 +5039,15 @@ static const struct mtk_soc_data mt2701_data = {
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.version = 1,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5059,11 +5063,15 @@ static const struct mtk_soc_data mt7621_data = {
.offload_version = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5081,11 +5089,15 @@ static const struct mtk_soc_data mt7622_data = {
.hash_offset = 2,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5102,11 +5114,15 @@ static const struct mtk_soc_data mt7623_data = {
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.disable_pll_modes = true,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5121,11 +5137,15 @@ static const struct mtk_soc_data mt7629_data = {
.required_pctl = false,
.has_accounting = true,
.version = 1,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5143,14 +5163,18 @@ static const struct mtk_soc_data mt7981_data = {
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma_v2),
- .rxd_size = sizeof(struct mtk_rx_dma_v2),
- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7986_data = {
@@ -5165,14 +5189,18 @@ static const struct mtk_soc_data mt7986_data = {
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma_v2),
- .rxd_size = sizeof(struct mtk_rx_dma_v2),
- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7988_data = {
@@ -5187,11 +5215,15 @@ static const struct mtk_soc_data mt7988_data = {
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma_v2),
- .rxd_size = sizeof(struct mtk_rx_dma_v2),
- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma_v2),
+ .irq_done_mask = MTK_RX_DONE_INT_V2,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
@@ -5204,11 +5236,15 @@ static const struct mtk_soc_data rt5350_data = {
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
.version = 1,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 9ae3b8a71..39b50de1d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -327,8 +327,8 @@
/* QDMA descriptor txd3 */
#define TX_DMA_OWNER_CPU BIT(31)
#define TX_DMA_LS0 BIT(30)
-#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
-#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
+#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
+#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len)
#define TX_DMA_SWC BIT(14)
#define TX_DMA_PQID GENMASK(3, 0)
#define TX_DMA_ADDR64_MASK GENMASK(3, 0)
@@ -348,8 +348,8 @@
/* QDMA descriptor rxd2 */
#define RX_DMA_DONE BIT(31)
#define RX_DMA_LSO BIT(30)
-#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
-#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
+#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
+#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
#define RX_DMA_VTAG BIT(15)
#define RX_DMA_ADDR64_MASK GENMASK(3, 0)
#if IS_ENABLED(CONFIG_64BIT)
@@ -1153,10 +1153,9 @@ struct mtk_reg_map {
* @foe_entry_size Foe table entry size.
* @has_accounting Bool indicating support for accounting of
* offloaded flows.
- * @txd_size Tx DMA descriptor size.
- * @rxd_size Rx DMA descriptor size.
- * @rx_irq_done_mask Rx irq done register mask.
- * @rx_dma_l4_valid Rx DMA valid register mask.
+ * @desc_size Tx/Rx DMA descriptor size.
+ * @irq_done_mask Rx irq done register mask.
+ * @dma_l4_valid Rx DMA valid register mask.
* @dma_max_len Max DMA tx/rx buffer length.
* @dma_len_offset Tx/Rx DMA length field offset.
*/
@@ -1174,13 +1173,17 @@ struct mtk_soc_data {
bool has_accounting;
bool disable_pll_modes;
struct {
- u32 txd_size;
- u32 rxd_size;
- u32 rx_irq_done_mask;
- u32 rx_dma_l4_valid;
+ u32 desc_size;
u32 dma_max_len;
u32 dma_len_offset;
- } txrx;
+ } tx;
+ struct {
+ u32 desc_size;
+ u32 irq_done_mask;
+ u32 dma_l4_valid;
+ u32 dma_max_len;
+ u32 dma_len_offset;
+ } rx;
};
#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 4957412ff..20768ef2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -969,19 +969,32 @@ static void cmd_work_handler(struct work_struct *work)
bool poll_cmd = ent->polling;
struct mlx5_cmd_layout *lay;
struct mlx5_core_dev *dev;
- unsigned long cb_timeout;
- struct semaphore *sem;
+ unsigned long timeout;
unsigned long flags;
int alloc_ret;
int cmd_mode;
+ complete(&ent->handling);
+
dev = container_of(cmd, struct mlx5_core_dev, cmd);
- cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
+ timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
- complete(&ent->handling);
- sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
- down(sem);
if (!ent->page_queue) {
+ if (down_timeout(&cmd->vars.sem, timeout)) {
+ mlx5_core_warn(dev, "%s(0x%x) timed out while waiting for a slot.\n",
+ mlx5_command_str(ent->op), ent->op);
+ if (ent->callback) {
+ ent->callback(-EBUSY, ent->context);
+ mlx5_free_cmd_msg(dev, ent->out);
+ free_msg(dev, ent->in);
+ cmd_ent_put(ent);
+ } else {
+ ent->ret = -EBUSY;
+ complete(&ent->done);
+ }
+ complete(&ent->slotted);
+ return;
+ }
alloc_ret = cmd_alloc_index(cmd, ent);
if (alloc_ret < 0) {
mlx5_core_err_rl(dev, "failed to allocate command entry\n");
@@ -994,10 +1007,11 @@ static void cmd_work_handler(struct work_struct *work)
ent->ret = -EAGAIN;
complete(&ent->done);
}
- up(sem);
+ up(&cmd->vars.sem);
return;
}
} else {
+ down(&cmd->vars.pages_sem);
ent->idx = cmd->vars.max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
clear_bit(ent->idx, &cmd->vars.bitmask);
@@ -1005,6 +1019,8 @@ static void cmd_work_handler(struct work_struct *work)
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
+ complete(&ent->slotted);
+
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
@@ -1023,7 +1039,7 @@ static void cmd_work_handler(struct work_struct *work)
ent->ts1 = ktime_get_ns();
cmd_mode = cmd->mode;
- if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout))
+ if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, timeout))
cmd_ent_get(ent);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
@@ -1143,6 +1159,9 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
ent->ret = -ECANCELED;
goto out_err;
}
+
+ wait_for_completion(&ent->slotted);
+
if (cmd->mode == CMD_MODE_POLLING || ent->polling)
wait_for_completion(&ent->done);
else if (!wait_for_completion_timeout(&ent->done, timeout))
@@ -1157,6 +1176,9 @@ out_err:
} else if (err == -ECANCELED) {
mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
mlx5_command_str(ent->op), ent->op);
+ } else if (err == -EBUSY) {
+ mlx5_core_warn(dev, "%s(0x%x) timeout while waiting for command semaphore.\n",
+ mlx5_command_str(ent->op), ent->op);
}
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
err, deliv_status_to_str(ent->status), ent->status);
@@ -1208,6 +1230,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
ent->polling = force_polling;
init_completion(&ent->handling);
+ init_completion(&ent->slotted);
if (!callback)
init_completion(&ent->done);
@@ -1225,7 +1248,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
return 0; /* mlx5_cmd_comp_handler() will put(ent) */
err = wait_func(dev, ent);
- if (err == -ETIMEDOUT || err == -ECANCELED)
+ if (err == -ETIMEDOUT || err == -ECANCELED || err == -EBUSY)
goto out_free;
ds = ent->ts2 - ent->ts1;
@@ -1611,6 +1634,9 @@ static int cmd_comp_notifier(struct notifier_block *nb,
dev = container_of(cmd, struct mlx5_core_dev, cmd);
eqe = data;
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return NOTIFY_DONE;
+
mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
return NOTIFY_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 82e6abbc1..c969b8e70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -28,8 +28,10 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev)
{
- /* AF_XDP doesn't support frames larger than PAGE_SIZE. */
- if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
+ /* AF_XDP doesn't support frames larger than PAGE_SIZE,
+ * and xsk->chunk_size is limited to 65535 bytes.
+ */
+ if ((size_t)xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size,
MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE);
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 1b9bc32ef..c5ea1d1d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1874,7 +1874,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
addr, vid, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)\n",
addr, vid, vport_num);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 349e28a6d..ef5567487 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -833,7 +833,7 @@ int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw, int max_slaves);
void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw);
-int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
+int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw);
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
@@ -925,7 +925,7 @@ mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; }
static inline int
-mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
{
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index e3cce110e..58529d1a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2501,6 +2501,16 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
esw_offloads_cleanup_reps(esw);
}
+static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep, u8 rep_type)
+{
+ if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
+ REP_REGISTERED, REP_LOADED) == REP_REGISTERED)
+ return esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
+
+ return 0;
+}
+
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
@@ -2525,13 +2535,11 @@ static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
int err;
rep = mlx5_eswitch_get_rep(esw, vport_num);
- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
- if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
- REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
- err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
- if (err)
- goto err_reps;
- }
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ err = __esw_offloads_load_rep(esw, rep, rep_type);
+ if (err)
+ goto err_reps;
+ }
return 0;
@@ -3276,7 +3284,7 @@ static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
-int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
{
struct mlx5_eswitch_rep *rep;
unsigned long i;
@@ -3289,13 +3297,13 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
return 0;
- ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
+ ret = __esw_offloads_load_rep(esw, rep, REP_IB);
if (ret)
return ret;
mlx5_esw_for_each_rep(esw, i, rep) {
if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
- mlx5_esw_offloads_rep_load(esw, rep->vport);
+ __esw_offloads_load_rep(esw, rep, REP_IB);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 69d482f7c..37598d116 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -814,7 +814,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
if (shared_fdb)
for (i = 0; i < ldev->ports; i++)
if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
}
static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
@@ -922,7 +922,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_rescan_drivers_locked(dev0);
for (i = 0; i < ldev->ports; i++) {
- err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
break;
}
@@ -933,7 +933,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
for (i = 0; i < ldev->ports; i++)
- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 82889f305..571ea26ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -99,7 +99,7 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
for (i = 0; i < ldev->ports; i++) {
- err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
goto err_rescan_drivers;
}
@@ -113,7 +113,7 @@ err_rescan_drivers:
err_add_devices:
mlx5_lag_add_devices(ldev);
for (i = 0; i < ldev->ports; i++)
- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index e285823bd..0288e19e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1680,6 +1680,8 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
struct devlink *devlink = priv_to_devlink(dev);
int err;
+ devl_lock(devlink);
+ devl_register(devlink);
dev->state = MLX5_DEVICE_STATE_UP;
err = mlx5_function_enable(dev, true, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
if (err) {
@@ -1693,27 +1695,21 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
goto query_hca_caps_err;
}
- devl_lock(devlink);
- devl_register(devlink);
-
err = mlx5_devlink_params_register(priv_to_devlink(dev));
if (err) {
mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
- goto params_reg_err;
+ goto query_hca_caps_err;
}
devl_unlock(devlink);
return 0;
-params_reg_err:
- devl_unregister(devlink);
- devl_unlock(devlink);
query_hca_caps_err:
- devl_unregister(devlink);
- devl_unlock(devlink);
mlx5_function_disable(dev, true);
out:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ devl_unregister(devlink);
+ devl_unlock(devlink);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 7ebe71280..b2986175d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -60,6 +60,13 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto remap_err;
}
+ /* Peer devlink logic expects to work on unregistered devlink instance. */
+ err = mlx5_core_peer_devlink_set(sf_dev, devlink);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err);
+ goto peer_devlink_set_err;
+ }
+
if (MLX5_ESWITCH_MANAGER(sf_dev->parent_mdev))
err = mlx5_init_one_light(mdev);
else
@@ -69,20 +76,10 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto init_one_err;
}
- err = mlx5_core_peer_devlink_set(sf_dev, devlink);
- if (err) {
- mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err);
- goto peer_devlink_set_err;
- }
-
return 0;
-peer_devlink_set_err:
- if (mlx5_dev_is_lightweight(sf_dev->mdev))
- mlx5_uninit_one_light(sf_dev->mdev);
- else
- mlx5_uninit_one(sf_dev->mdev);
init_one_err:
+peer_devlink_set_err:
iounmap(mdev->iseg);
remap_err:
mlx5_mdev_uninit(mdev);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 2635ef895..61d88207e 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -1087,8 +1087,6 @@ static int lan966x_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, lan966x);
lan966x->dev = &pdev->dev;
- lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
-
if (!device_get_mac_address(&pdev->dev, mac_addr)) {
ether_addr_copy(lan966x->base_mac, mac_addr);
} else {
@@ -1179,6 +1177,8 @@ static int lan966x_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, -ENODEV,
"no ethernet-ports child found\n");
+ lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
+
/* init switch */
lan966x_init(lan966x);
lan966x_stats_init(lan966x);
@@ -1257,6 +1257,8 @@ cleanup_ports:
destroy_workqueue(lan966x->stats_queue);
mutex_destroy(&lan966x->stats_lock);
+ debugfs_remove_recursive(lan966x->debugfs_root);
+
return err;
}
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 2729a2c5a..ca814fe8a 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -848,7 +848,7 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
}
if (!wait_for_completion_timeout(&ctx->comp_event,
- (msecs_to_jiffies(hwc->hwc_timeout) * HZ))) {
+ (msecs_to_jiffies(hwc->hwc_timeout)))) {
dev_err(hwc->dev, "HWC: Request timed out!\n");
err = -ETIMEDOUT;
goto out;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c278f8893..8159b4c31 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1206,7 +1206,6 @@ out:
static int qed_slowpath_wq_start(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn;
- char name[NAME_SIZE];
int i;
if (IS_VF(cdev))
@@ -1215,11 +1214,11 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i];
- snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
- cdev->pdev->bus->number,
- PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+ hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x",
+ 0, 0, cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn),
+ hwfn->abs_pf_id);
- hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
if (!hwfn->slowpath_wq) {
DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 32b73f398..70d0b3239 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4221,11 +4221,11 @@ static void rtl8169_doorbell(struct rtl8169_private *tp)
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- unsigned int frags = skb_shinfo(skb)->nr_frags;
struct rtl8169_private *tp = netdev_priv(dev);
unsigned int entry = tp->cur_tx % NUM_TX_DESC;
struct TxDesc *txd_first, *txd_last;
bool stop_queue, door_bell;
+ unsigned int frags;
u32 opts[2];
if (unlikely(!rtl_tx_slots_avail(tp))) {
@@ -4248,6 +4248,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd_first = tp->TxDescArray + entry;
+ frags = skb_shinfo(skb)->nr_frags;
if (frags) {
if (rtl8169_xmit_frags(tp, skb, opts, entry))
goto err_dma_1;
@@ -4541,10 +4542,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
- if (napi_schedule_prep(&tp->napi)) {
- rtl_irq_disable(tp);
- __napi_schedule(&tp->napi);
- }
+ rtl_irq_disable(tp);
+ napi_schedule(&tp->napi);
out:
rtl_ack_events(tp, status);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 46eee747c..45ef5ac07 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -156,8 +156,8 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
writew(*wp++, a);
}
-#define SMC_inw(a, r) _swapw(readw((a) + (r)))
-#define SMC_outw(lp, v, a, r) writew(_swapw(v), (a) + (r))
+#define SMC_inw(a, r) ioread16be((a) + (r))
+#define SMC_outw(lp, v, a, r) iowrite16be(v, (a) + (r))
#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 5a1d46dcd..618d455b4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -225,6 +225,8 @@ struct stmmac_extra_stats {
unsigned long mtl_est_hlbf;
unsigned long mtl_est_btre;
unsigned long mtl_est_btrlm;
+ unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES];
+ unsigned long mtl_est_txq_hlbf[MTL_MAX_TX_QUEUES];
/* per queue statistics */
struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f155e4841..1db1359d1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -260,6 +260,8 @@ struct stmmac_priv {
struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp;
struct stmmac_safety_stats sstats;
struct plat_stmmacenet_data *plat;
+ /* Protect est parameters */
+ struct mutex est_lock;
struct dma_features dma_cap;
struct stmmac_counters mmc;
int hw_cap_support;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
index 4da6ccc17..c9693f77e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
@@ -81,6 +81,7 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
u32 status, value, feqn, hbfq, hbfs, btrl, btrl_max;
void __iomem *est_addr = priv->estaddr;
u32 txqcnt_mask = BIT(txqcnt) - 1;
+ int i;
status = readl(est_addr + EST_STATUS);
@@ -125,6 +126,11 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
x->mtl_est_hlbf++;
+ for (i = 0; i < txqcnt; i++) {
+ if (feqn & BIT(i))
+ x->mtl_est_txq_hlbf[i]++;
+ }
+
/* Clear Interrupt */
writel(feqn, est_addr + EST_FRM_SZ_ERR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 83b732c30..12f4c0da8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2491,6 +2491,13 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
if (!xsk_tx_peek_desc(pool, &xdp_desc))
break;
+ if (priv->plat->est && priv->plat->est->enable &&
+ priv->plat->est->max_sdu[queue] &&
+ xdp_desc.len > priv->plat->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ continue;
+ }
+
if (likely(priv->extend_desc))
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -4485,6 +4492,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return stmmac_tso_xmit(skb, dev);
}
+ if (priv->plat->est && priv->plat->est->enable &&
+ priv->plat->est->max_sdu[queue] &&
+ skb->len > priv->plat->est->max_sdu[queue]){
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ goto max_sdu_err;
+ }
+
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
@@ -4702,6 +4716,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dma_map_err:
netdev_err(priv->dev, "Tx DMA map failed\n");
+max_sdu_err:
dev_kfree_skb(skb);
priv->xstats.tx_dropped++;
return NETDEV_TX_OK;
@@ -4858,6 +4873,13 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
return STMMAC_XDP_CONSUMED;
+ if (priv->plat->est && priv->plat->est->enable &&
+ priv->plat->est->max_sdu[queue] &&
+ xdpf->len > priv->plat->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ return STMMAC_XDP_CONSUMED;
+ }
+
if (likely(priv->extend_desc))
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index e04830a3a..0c5aab6dd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -70,11 +70,11 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
/* If EST is enabled, disabled it before adjust ptp time. */
if (priv->plat->est && priv->plat->est->enable) {
est_rst = true;
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
}
write_lock_irqsave(&priv->ptp_lock, flags);
@@ -87,7 +87,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
ktime_t current_time_ns, basetime;
u64 cycle_time;
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
time.tv_nsec = priv->plat->est->btr_reserve[0];
@@ -104,7 +104,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
priv->plat->est->enable = true;
ret = stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
if (ret)
netdev_err(priv->dev, "failed to configure EST\n");
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 26fa33e5e..620c16e9b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -915,8 +915,30 @@ struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
return time;
}
-static int tc_setup_taprio(struct stmmac_priv *priv,
- struct tc_taprio_qopt_offload *qopt)
+static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct plat_stmmacenet_data *plat = priv->plat;
+ u32 num_tc = qopt->mqprio.qopt.num_tc;
+ u32 offset, count, i, j;
+
+ /* QueueMaxSDU received from the driver corresponds to the Linux traffic
+ * class. Map queueMaxSDU per Linux traffic class to DWMAC Tx queues.
+ */
+ for (i = 0; i < num_tc; i++) {
+ if (!qopt->max_sdu[i])
+ continue;
+
+ offset = qopt->mqprio.qopt.offset[i];
+ count = qopt->mqprio.qopt.count[i];
+
+ for (j = offset; j < offset + count; j++)
+ plat->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
+ }
+}
+
+static int tc_taprio_configure(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
struct plat_stmmacenet_data *plat = priv->plat;
@@ -968,8 +990,6 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
if (qopt->cmd == TAPRIO_CMD_DESTROY)
goto disable;
- else if (qopt->cmd != TAPRIO_CMD_REPLACE)
- return -EOPNOTSUPP;
if (qopt->num_entries >= dep)
return -EINVAL;
@@ -984,17 +1004,19 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
if (!plat->est)
return -ENOMEM;
- mutex_init(&priv->plat->est->lock);
+ mutex_init(&priv->est_lock);
} else {
+ mutex_lock(&priv->est_lock);
memset(plat->est, 0, sizeof(*plat->est));
+ mutex_unlock(&priv->est_lock);
}
size = qopt->num_entries;
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
priv->plat->est->gcl_size = size;
priv->plat->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
for (i = 0; i < size; i++) {
s64 delta_ns = qopt->entries[i].interval;
@@ -1025,7 +1047,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->gcl[i] = delta_ns | (gates << wid);
}
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
/* Adjust for real system time */
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
@@ -1045,8 +1067,10 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->ter = qopt->cycle_time_extension;
+ tc_taprio_map_maxsdu_txq(priv, qopt);
+
if (fpe && !priv->dma_cap.fpesel) {
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
return -EOPNOTSUPP;
}
@@ -1057,7 +1081,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
ret = stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
if (ret) {
netdev_err(priv->dev, "failed to configure EST\n");
goto disable;
@@ -1074,11 +1098,16 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
disable:
if (priv->plat->est) {
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ /* Reset taprio status */
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
+ priv->xstats.max_sdu_txq_drop[i] = 0;
+ priv->xstats.mtl_est_txq_hlbf[i] = 0;
+ }
+ mutex_unlock(&priv->est_lock);
}
priv->plat->fpe_cfg->enable = false;
@@ -1095,6 +1124,57 @@ disable:
return ret;
}
+static void tc_taprio_stats(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ u64 window_drops = 0;
+ int i = 0;
+
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++)
+ window_drops += priv->xstats.max_sdu_txq_drop[i] +
+ priv->xstats.mtl_est_txq_hlbf[i];
+ qopt->stats.window_drops = window_drops;
+
+ /* Transmission overrun doesn't happen for stmmac, hence always 0 */
+ qopt->stats.tx_overruns = 0;
+}
+
+static void tc_taprio_queue_stats(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct tc_taprio_qopt_queue_stats *q_stats = &qopt->queue_stats;
+ int queue = qopt->queue_stats.queue;
+
+ q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] +
+ priv->xstats.mtl_est_txq_hlbf[queue];
+
+ /* Transmission overrun doesn't happen for stmmac, hence always 0 */
+ q_stats->stats.tx_overruns = 0;
+}
+
+static int tc_setup_taprio(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ int err = 0;
+
+ switch (qopt->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ case TAPRIO_CMD_DESTROY:
+ err = tc_taprio_configure(priv, qopt);
+ break;
+ case TAPRIO_CMD_STATS:
+ tc_taprio_stats(priv, qopt);
+ break;
+ case TAPRIO_CMD_QUEUE_STATS:
+ tc_taprio_queue_stats(priv, qopt);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
static int tc_setup_etf(struct stmmac_priv *priv,
struct tc_etf_qopt_offload *qopt)
{
@@ -1126,6 +1206,7 @@ static int tc_query_caps(struct stmmac_priv *priv,
return -EOPNOTSUPP;
caps->gate_mask_per_txq = true;
+ caps->supports_queue_max_sdu = true;
return 0;
}
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 9bd1df830..d3a2fbb14 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -949,17 +949,6 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void gem_poll_controller(struct net_device *dev)
-{
- struct gem *gp = netdev_priv(dev);
-
- disable_irq(gp->pdev->irq);
- gem_interrupt(gp->pdev->irq, dev);
- enable_irq(gp->pdev->irq);
-}
-#endif
-
static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gem *gp = netdev_priv(dev);
@@ -2839,9 +2828,6 @@ static const struct net_device_ops gem_netdev_ops = {
.ndo_change_mtu = gem_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = gem_set_mac_address,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = gem_poll_controller,
-#endif
};
static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 4a78e8a1c..3fa49448a 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -2156,7 +2156,12 @@ static int prueth_probe(struct platform_device *pdev)
prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
- emac_phy_connect(prueth->emac[PRUETH_MAC0]);
+ ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]);
+ if (ret) {
+ dev_err(dev,
+ "can't connect to MII0 PHY, error -%d", ret);
+ goto netdev_unregister;
+ }
phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
}
@@ -2168,7 +2173,12 @@ static int prueth_probe(struct platform_device *pdev)
}
prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
- emac_phy_connect(prueth->emac[PRUETH_MAC1]);
+ ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]);
+ if (ret) {
+ dev_err(dev,
+ "can't connect to MII1 PHY, error %d", ret);
+ goto netdev_unregister;
+ }
phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 1db754615..c09a6f744 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -1958,7 +1958,7 @@ int wx_sw_init(struct wx *wx)
return -ENOMEM;
}
- wx->msix_in_use = false;
+ bitmap_zero(wx->state, WX_STATE_NBITS);
return 0;
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 08d3e4069..c87afe5de 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -1614,14 +1614,12 @@ static int wx_acquire_msix_vectors(struct wx *wx)
/* One for non-queue interrupts */
nvecs += 1;
- if (!wx->msix_in_use) {
- wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!wx->msix_entry) {
- kfree(wx->msix_q_entries);
- wx->msix_q_entries = NULL;
- return -ENOMEM;
- }
+ wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!wx->msix_entry) {
+ kfree(wx->msix_q_entries);
+ wx->msix_q_entries = NULL;
+ return -ENOMEM;
}
nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
@@ -1931,10 +1929,8 @@ void wx_reset_interrupt_capability(struct wx *wx)
if (pdev->msix_enabled) {
kfree(wx->msix_q_entries);
wx->msix_q_entries = NULL;
- if (!wx->msix_in_use) {
- kfree(wx->msix_entry);
- wx->msix_entry = NULL;
- }
+ kfree(wx->msix_entry);
+ wx->msix_entry = NULL;
}
pci_free_irq_vectors(wx->pdev);
}
@@ -2694,15 +2690,63 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
wx->rss_enabled = false;
}
- if (changed &
- (NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX))
+ netdev->features = features;
+
+ if (wx->mac.type == wx_mac_sp && changed & NETIF_F_HW_VLAN_CTAG_RX)
+ wx->do_reset(netdev);
+ else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER))
wx_set_rx_mode(netdev);
- return 1;
+ return 0;
}
EXPORT_SYMBOL(wx_set_features);
+#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_STAG_RX)
+
+#define NETIF_VLAN_INSERTION_FEATURES (NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
+#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_STAG_FILTER)
+
+netdev_features_t wx_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = netdev->features ^ features;
+ struct wx *wx = netdev_priv(netdev);
+
+ if (changed & NETIF_VLAN_STRIPPING_FEATURES) {
+ if ((features & NETIF_VLAN_STRIPPING_FEATURES) != NETIF_VLAN_STRIPPING_FEATURES &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES) != 0) {
+ features &= ~NETIF_VLAN_STRIPPING_FEATURES;
+ features |= netdev->features & NETIF_VLAN_STRIPPING_FEATURES;
+ wx_err(wx, "802.1Q and 802.1ad VLAN stripping must be either both on or both off.");
+ }
+ }
+
+ if (changed & NETIF_VLAN_INSERTION_FEATURES) {
+ if ((features & NETIF_VLAN_INSERTION_FEATURES) != NETIF_VLAN_INSERTION_FEATURES &&
+ (features & NETIF_VLAN_INSERTION_FEATURES) != 0) {
+ features &= ~NETIF_VLAN_INSERTION_FEATURES;
+ features |= netdev->features & NETIF_VLAN_INSERTION_FEATURES;
+ wx_err(wx, "802.1Q and 802.1ad VLAN insertion must be either both on or both off.");
+ }
+ }
+
+ if (changed & NETIF_VLAN_FILTERING_FEATURES) {
+ if ((features & NETIF_VLAN_FILTERING_FEATURES) != NETIF_VLAN_FILTERING_FEATURES &&
+ (features & NETIF_VLAN_FILTERING_FEATURES) != 0) {
+ features &= ~NETIF_VLAN_FILTERING_FEATURES;
+ features |= netdev->features & NETIF_VLAN_FILTERING_FEATURES;
+ wx_err(wx, "802.1Q and 802.1ad VLAN filtering must be either both on or both off.");
+ }
+ }
+
+ return features;
+}
+EXPORT_SYMBOL(wx_fix_features);
+
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring)
{
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
index ec909e876..c41b29ea8 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
@@ -30,6 +30,8 @@ int wx_setup_resources(struct wx *wx);
void wx_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats);
int wx_set_features(struct net_device *netdev, netdev_features_t features);
+netdev_features_t wx_fix_features(struct net_device *netdev,
+ netdev_features_t features);
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index b4dc4f341..5aaf7b1fa 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -982,8 +982,13 @@ struct wx_hw_stats {
u64 qmprc;
};
+enum wx_state {
+ WX_STATE_RESETTING,
+ WX_STATE_NBITS, /* must be last */
+};
struct wx {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ DECLARE_BITMAP(state, WX_STATE_NBITS);
void *priv;
u8 __iomem *hw_addr;
@@ -1047,7 +1052,6 @@ struct wx {
unsigned int queues_per_pool;
struct msix_entry *msix_q_entries;
struct msix_entry *msix_entry;
- bool msix_in_use;
struct wx_ring_feature ring_feature[RING_F_ARRAY_SIZE];
/* misc interrupt status block */
@@ -1072,6 +1076,8 @@ struct wx {
u64 hw_csum_rx_good;
u64 hw_csum_rx_error;
u64 alloc_rx_buff_failed;
+
+ void (*do_reset)(struct net_device *netdev);
};
#define WX_INTR_ALL (~0ULL)
@@ -1132,4 +1138,19 @@ static inline struct wx *phylink_to_wx(struct phylink_config *config)
return container_of(config, struct wx, phylink_config);
}
+static inline int wx_set_state_reset(struct wx *wx)
+{
+ u8 timeout = 50;
+
+ while (test_and_set_bit(WX_STATE_RESETTING, wx->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
#endif /* _WX_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index 786a652ae..46a5a3e95 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -52,7 +52,7 @@ static int ngbe_set_ringparam(struct net_device *netdev,
struct wx *wx = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
struct wx_ring *temp_ring;
- int i;
+ int i, err = 0;
new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
@@ -64,6 +64,10 @@ static int ngbe_set_ringparam(struct net_device *netdev,
new_rx_count == wx->rx_ring_count)
return 0;
+ err = wx_set_state_reset(wx);
+ if (err)
+ return err;
+
if (!netif_running(wx->netdev)) {
for (i = 0; i < wx->num_tx_queues; i++)
wx->tx_ring[i]->count = new_tx_count;
@@ -72,14 +76,16 @@ static int ngbe_set_ringparam(struct net_device *netdev,
wx->tx_ring_count = new_tx_count;
wx->rx_ring_count = new_rx_count;
- return 0;
+ goto clear_reset;
}
/* allocate temporary buffer to store rings in */
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
- if (!temp_ring)
- return -ENOMEM;
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
ngbe_down(wx);
@@ -89,7 +95,9 @@ static int ngbe_set_ringparam(struct net_device *netdev,
wx_configure(wx);
ngbe_up(wx);
- return 0;
+clear_reset:
+ clear_bit(WX_STATE_RESETTING, wx->state);
+ return err;
}
static int ngbe_set_channels(struct net_device *dev,
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index fdd6b4f70..e894e01d0 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -499,6 +499,7 @@ static const struct net_device_ops ngbe_netdev_ops = {
.ndo_start_xmit = wx_xmit_frame,
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
+ .ndo_fix_features = wx_fix_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index 7507f762e..427188752 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_TXGBE) += txgbe.o
txgbe-objs := txgbe_main.o \
txgbe_hw.o \
txgbe_phy.o \
+ txgbe_irq.o \
txgbe_ethtool.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index db675512c..31fde3fa7 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -19,7 +19,7 @@ static int txgbe_set_ringparam(struct net_device *netdev,
struct wx *wx = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
struct wx_ring *temp_ring;
- int i;
+ int i, err = 0;
new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
@@ -31,6 +31,10 @@ static int txgbe_set_ringparam(struct net_device *netdev,
new_rx_count == wx->rx_ring_count)
return 0;
+ err = wx_set_state_reset(wx);
+ if (err)
+ return err;
+
if (!netif_running(wx->netdev)) {
for (i = 0; i < wx->num_tx_queues; i++)
wx->tx_ring[i]->count = new_tx_count;
@@ -39,14 +43,16 @@ static int txgbe_set_ringparam(struct net_device *netdev,
wx->tx_ring_count = new_tx_count;
wx->rx_ring_count = new_rx_count;
- return 0;
+ goto clear_reset;
}
/* allocate temporary buffer to store rings in */
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
- if (!temp_ring)
- return -ENOMEM;
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
txgbe_down(wx);
@@ -55,7 +61,9 @@ static int txgbe_set_ringparam(struct net_device *netdev,
txgbe_up(wx);
- return 0;
+clear_reset:
+ clear_bit(WX_STATE_RESETTING, wx->state);
+ return err;
}
static int txgbe_set_channels(struct net_device *dev,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
new file mode 100644
index 000000000..b3e3605d1
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/irqdomain.h>
+#include <linux/pci.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_hw.h"
+#include "txgbe_type.h"
+#include "txgbe_phy.h"
+#include "txgbe_irq.h"
+
+/**
+ * txgbe_irq_enable - Enable default interrupt generation settings
+ * @wx: pointer to private structure
+ * @queues: enable irqs for queues
+ **/
+void txgbe_irq_enable(struct wx *wx, bool queues)
+{
+ wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
+
+ /* unmask interrupt */
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
+ if (queues)
+ wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
+}
+
+/**
+ * txgbe_intr - msi/legacy mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ struct pci_dev *pdev;
+ u32 eicr;
+
+ q_vector = wx->q_vector[0];
+ pdev = wx->pdev;
+
+ eicr = wx_misc_isb(wx, WX_ISB_VEC0);
+ if (!eicr) {
+ /* shared interrupt alert!
+ * the interrupt that we masked before the ICR read.
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, true);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+ wx->isb_mem[WX_ISB_VEC0] = 0;
+ if (!(pdev->msi_enabled))
+ wr32(wx, WX_PX_INTA, 1);
+
+ wx->isb_mem[WX_ISB_MISC] = 0;
+ /* would disable interrupts here but it is auto disabled */
+ napi_schedule_irqoff(&q_vector->napi);
+
+ /* re-enable link(maybe) and non-queue interrupts, no flush.
+ * txgbe_poll will re-enable the queue interrupts
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * txgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @wx: board private structure
+ *
+ * Allocate MSI-X vectors and request interrupts from the kernel.
+ **/
+static int txgbe_request_msix_irqs(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int vector, err;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_q_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring)
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-TxRx-%d", netdev->name, entry->entry);
+ else
+ /* skip this unused q_vector */
+ continue;
+
+ err = request_irq(entry->vector, wx_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
+ q_vector->name, err);
+ goto free_queue_irqs;
+ }
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ free_irq(wx->msix_q_entries[vector].vector,
+ wx->q_vector[vector]);
+ }
+ wx_reset_interrupt_capability(wx);
+ return err;
+}
+
+/**
+ * txgbe_request_irq - initialize interrupts
+ * @wx: board private structure
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+int txgbe_request_irq(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ struct pci_dev *pdev = wx->pdev;
+ int err;
+
+ if (pdev->msix_enabled)
+ err = txgbe_request_msix_irqs(wx);
+ else if (pdev->msi_enabled)
+ err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
+ netdev->name, wx);
+ else
+ err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
+ netdev->name, wx);
+
+ if (err)
+ wx_err(wx, "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+static int txgbe_request_gpio_irq(struct txgbe *txgbe)
+{
+ txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ return request_threaded_irq(txgbe->gpio_irq, NULL,
+ txgbe_gpio_irq_handler,
+ IRQF_ONESHOT, "txgbe-gpio-irq", txgbe);
+}
+
+static int txgbe_request_link_irq(struct txgbe *txgbe)
+{
+ txgbe->link_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
+ return request_threaded_irq(txgbe->link_irq, NULL,
+ txgbe_link_irq_handler,
+ IRQF_ONESHOT, "txgbe-link-irq", txgbe);
+}
+
+static const struct irq_chip txgbe_irq_chip = {
+ .name = "txgbe-misc-irq",
+};
+
+static int txgbe_misc_irq_domain_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct txgbe *txgbe = d->host_data;
+
+ irq_set_chip_data(irq, txgbe);
+ irq_set_chip(irq, &txgbe->misc.chip);
+ irq_set_nested_thread(irq, true);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops txgbe_misc_irq_domain_ops = {
+ .map = txgbe_misc_irq_domain_map,
+};
+
+static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
+{
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ unsigned int nhandled = 0;
+ unsigned int sub_irq;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ if (eicr & TXGBE_PX_MISC_GPIO) {
+ sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ handle_nested_irq(sub_irq);
+ nhandled++;
+ }
+ if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
+ TXGBE_PX_MISC_ETH_AN)) {
+ sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
+ handle_nested_irq(sub_irq);
+ nhandled++;
+ }
+
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void txgbe_del_irq_domain(struct txgbe *txgbe)
+{
+ int hwirq, virq;
+
+ for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++) {
+ virq = irq_find_mapping(txgbe->misc.domain, hwirq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(txgbe->misc.domain);
+}
+
+void txgbe_free_misc_irq(struct txgbe *txgbe)
+{
+ free_irq(txgbe->gpio_irq, txgbe);
+ free_irq(txgbe->link_irq, txgbe);
+ free_irq(txgbe->misc.irq, txgbe);
+ txgbe_del_irq_domain(txgbe);
+}
+
+int txgbe_setup_misc_irq(struct txgbe *txgbe)
+{
+ struct wx *wx = txgbe->wx;
+ int hwirq, err;
+
+ txgbe->misc.nirqs = 2;
+ txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
+ &txgbe_misc_irq_domain_ops, txgbe);
+ if (!txgbe->misc.domain)
+ return -ENOMEM;
+
+ for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++)
+ irq_create_mapping(txgbe->misc.domain, hwirq);
+
+ txgbe->misc.chip = txgbe_irq_chip;
+ if (wx->pdev->msix_enabled)
+ txgbe->misc.irq = wx->msix_entry->vector;
+ else
+ txgbe->misc.irq = wx->pdev->irq;
+
+ err = request_threaded_irq(txgbe->misc.irq, NULL,
+ txgbe_misc_irq_handle,
+ IRQF_ONESHOT,
+ wx->netdev->name, txgbe);
+ if (err)
+ goto del_misc_irq;
+
+ err = txgbe_request_gpio_irq(txgbe);
+ if (err)
+ goto free_msic_irq;
+
+ err = txgbe_request_link_irq(txgbe);
+ if (err)
+ goto free_gpio_irq;
+
+ return 0;
+
+free_gpio_irq:
+ free_irq(txgbe->gpio_irq, txgbe);
+free_msic_irq:
+ free_irq(txgbe->misc.irq, txgbe);
+del_misc_irq:
+ txgbe_del_irq_domain(txgbe);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
new file mode 100644
index 000000000..b77945e7a
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+
+void txgbe_irq_enable(struct wx *wx, bool queues);
+int txgbe_request_irq(struct wx *wx);
+void txgbe_free_misc_irq(struct txgbe *txgbe);
+int txgbe_setup_misc_irq(struct txgbe *txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 3b151c410..8c7a74981 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -17,6 +17,7 @@
#include "txgbe_type.h"
#include "txgbe_hw.h"
#include "txgbe_phy.h"
+#include "txgbe_irq.h"
#include "txgbe_ethtool.h"
char txgbe_driver_name[] = "txgbe";
@@ -76,137 +77,11 @@ static int txgbe_enumerate_functions(struct wx *wx)
return physfns;
}
-/**
- * txgbe_irq_enable - Enable default interrupt generation settings
- * @wx: pointer to private structure
- * @queues: enable irqs for queues
- **/
-static void txgbe_irq_enable(struct wx *wx, bool queues)
-{
- wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
-
- /* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC);
- if (queues)
- wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
-}
-
-/**
- * txgbe_intr - msi/legacy mode Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
-{
- struct wx_q_vector *q_vector;
- struct wx *wx = data;
- struct pci_dev *pdev;
- u32 eicr;
-
- q_vector = wx->q_vector[0];
- pdev = wx->pdev;
-
- eicr = wx_misc_isb(wx, WX_ISB_VEC0);
- if (!eicr) {
- /* shared interrupt alert!
- * the interrupt that we masked before the ICR read.
- */
- if (netif_running(wx->netdev))
- txgbe_irq_enable(wx, true);
- return IRQ_NONE; /* Not our interrupt */
- }
- wx->isb_mem[WX_ISB_VEC0] = 0;
- if (!(pdev->msi_enabled))
- wr32(wx, WX_PX_INTA, 1);
-
- wx->isb_mem[WX_ISB_MISC] = 0;
- /* would disable interrupts here but it is auto disabled */
- napi_schedule_irqoff(&q_vector->napi);
-
- /* re-enable link(maybe) and non-queue interrupts, no flush.
- * txgbe_poll will re-enable the queue interrupts
- */
- if (netif_running(wx->netdev))
- txgbe_irq_enable(wx, false);
-
- return IRQ_HANDLED;
-}
-
-/**
- * txgbe_request_msix_irqs - Initialize MSI-X interrupts
- * @wx: board private structure
- *
- * Allocate MSI-X vectors and request interrupts from the kernel.
- **/
-static int txgbe_request_msix_irqs(struct wx *wx)
-{
- struct net_device *netdev = wx->netdev;
- int vector, err;
-
- for (vector = 0; vector < wx->num_q_vectors; vector++) {
- struct wx_q_vector *q_vector = wx->q_vector[vector];
- struct msix_entry *entry = &wx->msix_q_entries[vector];
-
- if (q_vector->tx.ring && q_vector->rx.ring)
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-TxRx-%d", netdev->name, entry->entry);
- else
- /* skip this unused q_vector */
- continue;
-
- err = request_irq(entry->vector, wx_msix_clean_rings, 0,
- q_vector->name, q_vector);
- if (err) {
- wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
- q_vector->name, err);
- goto free_queue_irqs;
- }
- }
-
- return 0;
-
-free_queue_irqs:
- while (vector) {
- vector--;
- free_irq(wx->msix_q_entries[vector].vector,
- wx->q_vector[vector]);
- }
- wx_reset_interrupt_capability(wx);
- return err;
-}
-
-/**
- * txgbe_request_irq - initialize interrupts
- * @wx: board private structure
- *
- * Attempt to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- **/
-static int txgbe_request_irq(struct wx *wx)
-{
- struct net_device *netdev = wx->netdev;
- struct pci_dev *pdev = wx->pdev;
- int err;
-
- if (pdev->msix_enabled)
- err = txgbe_request_msix_irqs(wx);
- else if (pdev->msi_enabled)
- err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
- netdev->name, wx);
- else
- err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
- netdev->name, wx);
-
- if (err)
- wx_err(wx, "request_irq failed, Error %d\n", err);
-
- return err;
-}
-
static void txgbe_up_complete(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
+ txgbe_reinit_gpio_intr(wx);
wx_control_hw(wx, true);
wx_configure_vectors(wx);
@@ -394,6 +269,8 @@ static int txgbe_sw_init(struct wx *wx)
wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
+ wx->do_reset = txgbe_do_reset;
+
return 0;
}
@@ -518,6 +395,7 @@ static void txgbe_shutdown(struct pci_dev *pdev)
int txgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct wx *wx = netdev_priv(dev);
+ struct txgbe *txgbe = wx->priv;
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
@@ -528,6 +406,7 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
else
txgbe_reset(wx);
+ txgbe_free_misc_irq(txgbe);
wx_clear_interrupt_scheme(wx);
if (tc)
@@ -536,6 +415,7 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
netdev_reset_tc(dev);
wx_init_interrupt_scheme(wx);
+ txgbe_setup_misc_irq(txgbe);
if (netif_running(dev))
txgbe_open(dev);
@@ -543,6 +423,34 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
return 0;
}
+static void txgbe_reinit_locked(struct wx *wx)
+{
+ int err = 0;
+
+ netif_trans_update(wx->netdev);
+
+ err = wx_set_state_reset(wx);
+ if (err) {
+ wx_err(wx, "wait device reset timeout\n");
+ return;
+ }
+
+ txgbe_down(wx);
+ txgbe_up(wx);
+
+ clear_bit(WX_STATE_RESETTING, wx->state);
+}
+
+void txgbe_do_reset(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ txgbe_reinit_locked(wx);
+ else
+ txgbe_reset(wx);
+}
+
static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open,
.ndo_stop = txgbe_close,
@@ -550,6 +458,7 @@ static const struct net_device_ops txgbe_netdev_ops = {
.ndo_start_xmit = wx_xmit_frame,
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
+ .ndo_fix_features = wx_fix_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
@@ -751,10 +660,14 @@ static int txgbe_probe(struct pci_dev *pdev,
txgbe->wx = wx;
wx->priv = txgbe;
- err = txgbe_init_phy(txgbe);
+ err = txgbe_setup_misc_irq(txgbe);
if (err)
goto err_release_hw;
+ err = txgbe_init_phy(txgbe);
+ if (err)
+ goto err_free_misc_irq;
+
err = register_netdev(netdev);
if (err)
goto err_remove_phy;
@@ -781,6 +694,8 @@ static int txgbe_probe(struct pci_dev *pdev,
err_remove_phy:
txgbe_remove_phy(txgbe);
+err_free_misc_irq:
+ txgbe_free_misc_irq(txgbe);
err_release_hw:
wx_clear_interrupt_scheme(wx);
wx_control_hw(wx, false);
@@ -813,6 +728,7 @@ static void txgbe_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
txgbe_remove_phy(txgbe);
+ txgbe_free_misc_irq(txgbe);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 8cddc9ddb..93295916b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -20,8 +20,6 @@
#include "txgbe_phy.h"
#include "txgbe_hw.h"
-#define TXGBE_I2C_CLK_DEV_NAME "i2c_dw"
-
static int txgbe_swnodes_register(struct txgbe *txgbe)
{
struct txgbe_nodes *nodes = &txgbe->nodes;
@@ -294,6 +292,21 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
return 0;
}
+irqreturn_t txgbe_link_irq_handler(int irq, void *data)
+{
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ u32 status;
+ bool up;
+
+ status = rd32(wx, TXGBE_CFG_PORT_ST);
+ up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP);
+
+ phylink_mac_change(wx->phylink, up);
+
+ return IRQ_HANDLED;
+}
+
static int txgbe_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct wx *wx = gpiochip_get_data(chip);
@@ -439,7 +452,7 @@ static int txgbe_gpio_set_type(struct irq_data *d, unsigned int type)
}
static const struct irq_chip txgbe_gpio_irq_chip = {
- .name = "txgbe_gpio_irq",
+ .name = "txgbe-gpio-irq",
.irq_ack = txgbe_gpio_irq_ack,
.irq_mask = txgbe_gpio_irq_mask,
.irq_unmask = txgbe_gpio_irq_unmask,
@@ -448,29 +461,25 @@ static const struct irq_chip txgbe_gpio_irq_chip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
-static void txgbe_irq_handler(struct irq_desc *desc)
+irqreturn_t txgbe_gpio_irq_handler(int irq, void *data)
{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct wx *wx = irq_desc_get_handler_data(desc);
- struct txgbe *txgbe = wx->priv;
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
irq_hw_number_t hwirq;
unsigned long gpioirq;
struct gpio_chip *gc;
unsigned long flags;
- u32 eicr;
-
- eicr = wx_misc_isb(wx, WX_ISB_MISC);
-
- chained_irq_enter(chip, desc);
gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
gc = txgbe->gpio;
for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
int gpio = irq_find_mapping(gc->irq.domain, hwirq);
+ struct irq_data *d = irq_get_irq_data(gpio);
u32 irq_type = irq_get_trigger_type(gpio);
- generic_handle_domain_irq(gc->irq.domain, hwirq);
+ txgbe_gpio_irq_ack(d);
+ handle_nested_irq(gpio);
if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
raw_spin_lock_irqsave(&wx->gpio_lock, flags);
@@ -479,17 +488,34 @@ static void txgbe_irq_handler(struct irq_desc *desc)
}
}
- chained_irq_exit(chip, desc);
+ return IRQ_HANDLED;
+}
+
+void txgbe_reinit_gpio_intr(struct wx *wx)
+{
+ struct txgbe *txgbe = wx->priv;
+ irq_hw_number_t hwirq;
+ unsigned long gpioirq;
+ struct gpio_chip *gc;
+ unsigned long flags;
+
+ /* for gpio interrupt pending before irq enable */
+ gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
- if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
- TXGBE_PX_MISC_ETH_AN)) {
- u32 reg = rd32(wx, TXGBE_CFG_PORT_ST);
+ gc = txgbe->gpio;
+ for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
+ int gpio = irq_find_mapping(gc->irq.domain, hwirq);
+ struct irq_data *d = irq_get_irq_data(gpio);
+ u32 irq_type = irq_get_trigger_type(gpio);
- phylink_mac_change(wx->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP));
- }
+ txgbe_gpio_irq_ack(d);
- /* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC);
+ if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ raw_spin_lock_irqsave(&wx->gpio_lock, flags);
+ txgbe_toggle_trigger(gc, hwirq);
+ raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
+ }
+ }
}
static int txgbe_gpio_init(struct txgbe *txgbe)
@@ -526,19 +552,6 @@ static int txgbe_gpio_init(struct txgbe *txgbe)
girq = &gc->irq;
gpio_irq_chip_set_chip(girq, &txgbe_gpio_irq_chip);
- girq->parent_handler = txgbe_irq_handler;
- girq->parent_handler_data = wx;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(dev, girq->num_parents,
- sizeof(*girq->parents), GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
-
- /* now only suuported on MSI-X interrupt */
- if (!wx->msix_entry)
- return -EPERM;
-
- girq->parents[0] = wx->msix_entry->vector;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
@@ -558,8 +571,8 @@ static int txgbe_clock_register(struct txgbe *txgbe)
char clk_name[32];
struct clk *clk;
- snprintf(clk_name, sizeof(clk_name), "%s.%d",
- TXGBE_I2C_CLK_DEV_NAME, pci_dev_id(pdev));
+ snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d",
+ pci_dev_id(pdev));
clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000);
if (IS_ERR(clk))
@@ -621,7 +634,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe)
info.parent = &pdev->dev;
info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]);
- info.name = TXGBE_I2C_CLK_DEV_NAME;
+ info.name = "i2c_designware";
info.id = pci_dev_id(pdev);
info.res = &DEFINE_RES_IRQ(pdev->irq);
@@ -756,8 +769,6 @@ int txgbe_init_phy(struct txgbe *txgbe)
goto err_unregister_i2c;
}
- wx->msix_in_use = true;
-
return 0;
err_unregister_i2c:
@@ -790,5 +801,4 @@ void txgbe_remove_phy(struct txgbe *txgbe)
phylink_destroy(txgbe->wx->phylink);
xpcs_destroy(txgbe->xpcs);
software_node_unregister_node_group(txgbe->nodes.group);
- txgbe->wx->msix_in_use = false;
}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
index 1ab592124..8a026d804 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
@@ -4,6 +4,9 @@
#ifndef _TXGBE_PHY_H_
#define _TXGBE_PHY_H_
+irqreturn_t txgbe_gpio_irq_handler(int irq, void *data);
+void txgbe_reinit_gpio_intr(struct wx *wx);
+irqreturn_t txgbe_link_irq_handler(int irq, void *data);
int txgbe_init_phy(struct txgbe *txgbe);
void txgbe_remove_phy(struct txgbe *txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 270a6fd9a..f434a7865 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -5,6 +5,7 @@
#define _TXGBE_TYPE_H_
#include <linux/property.h>
+#include <linux/irq.h>
/* Device IDs */
#define TXGBE_DEV_ID_SP1000 0x1001
@@ -133,6 +134,7 @@ extern char txgbe_driver_name[];
void txgbe_down(struct wx *wx);
void txgbe_up(struct wx *wx);
int txgbe_setup_tc(struct net_device *dev, u8 tc);
+void txgbe_do_reset(struct net_device *netdev);
#define NODE_PROP(_NAME, _PROP) \
(const struct software_node) { \
@@ -169,15 +171,31 @@ struct txgbe_nodes {
const struct software_node *group[SWNODE_MAX + 1];
};
+enum txgbe_misc_irqs {
+ TXGBE_IRQ_GPIO = 0,
+ TXGBE_IRQ_LINK,
+ TXGBE_IRQ_MAX
+};
+
+struct txgbe_irq {
+ struct irq_chip chip;
+ struct irq_domain *domain;
+ int nirqs;
+ int irq;
+};
+
struct txgbe {
struct wx *wx;
struct txgbe_nodes nodes;
+ struct txgbe_irq misc;
struct dw_xpcs *xpcs;
struct platform_device *sfp_dev;
struct platform_device *i2c_dev;
struct clk_lookup *clock;
struct clk *clk;
struct gpio_chip *gpio;
+ unsigned int gpio_irq;
+ unsigned int link_irq;
};
#endif /* _TXGBE_TYPE_H_ */