From 76047c0312414915035f4ebbaa533fe3817a21e0 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Mon, 3 Jun 2024 07:08:49 +0200 Subject: Adding upstream version 6.8.12. Signed-off-by: Daniel Baumann --- drivers/net/ethernet/intel/ice/ice_base.c | 134 +++++++++++++++++++++++++++++- 1 file changed, 132 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_base.c') diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index c979192e44..a545a7917e 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -536,7 +536,7 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring) * * Return 0 on success and a negative value on error. */ -int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) +static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) { struct device *dev = ice_pf_to_dev(ring->vsi->back); u32 num_bufs = ICE_RX_DESC_UNUSED(ring); @@ -631,6 +631,62 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) return 0; } +int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) +{ + if (q_idx >= vsi->num_rxq) + return -EINVAL; + + return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); +} + +/** + * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length + * @vsi: VSI + */ +static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) +{ + if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { + vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; + vsi->rx_buf_len = ICE_RXBUF_1664; +#if (PAGE_SIZE < 8192) + } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && + (vsi->netdev->mtu <= ETH_DATA_LEN)) { + vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; + vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; +#endif + } else { + vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; + vsi->rx_buf_len = ICE_RXBUF_3072; + } +} + +/** + * ice_vsi_cfg_rxqs - Configure the VSI for Rx + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + * Configure the Rx VSI for operation. + */ +int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) +{ + u16 i; + + if (vsi->type == ICE_VSI_VF) + goto setup_rings; + + ice_vsi_cfg_frame_size(vsi); +setup_rings: + /* set up individual rings */ + ice_for_each_rxq(vsi, i) { + int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]); + + if (err) + return err; + } + + return 0; +} + /** * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI * @qs_cfg: gathered variables needed for pf->vsi queues assignment @@ -826,7 +882,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi) * @ring: Tx ring to be configured * @qg_buf: queue group buffer */ -int +static int ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, struct ice_aqc_add_tx_qgrp *qg_buf) { @@ -897,6 +953,80 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, return 0; } +int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, + u16 q_idx) +{ + DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); + + if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) + return -EINVAL; + + qg_buf->num_txqs = 1; + + return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); +} + +/** + * ice_vsi_cfg_txqs - Configure the VSI for Tx + * @vsi: the VSI being configured + * @rings: Tx ring array to be configured + * @count: number of Tx ring array elements + * + * Return 0 on success and a negative value on error + * Configure the Tx VSI for operation. + */ +static int +ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count) +{ + DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); + int err = 0; + u16 q_idx; + + qg_buf->num_txqs = 1; + + for (q_idx = 0; q_idx < count; q_idx++) { + err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); + if (err) + break; + } + + return err; +} + +/** + * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + * Configure the Tx VSI for operation. + */ +int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) +{ + return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq); +} + +/** + * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + * Configure the Tx queues dedicated for XDP in given VSI for operation. + */ +int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) +{ + int ret; + int i; + + ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); + if (ret) + return ret; + + ice_for_each_rxq(vsi, i) + ice_tx_xsk_pool(vsi, i); + + return 0; +} + /** * ice_cfg_itr - configure the initial interrupt throttle values * @hw: pointer to the HW structure -- cgit v1.2.3