diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
commit | 01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch) | |
tree | b406c5242a088c4f59c6e4b719b783f43aca6ae9 /drivers/net/virtio_net.c | |
parent | Adding upstream version 6.7.12. (diff) | |
download | linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip |
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r-- | drivers/net/virtio_net.c | 350 |
1 files changed, 285 insertions, 65 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 1caf21fd50..ec14bf2a9a 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -19,6 +19,7 @@ #include <linux/average.h> #include <linux/filter.h> #include <linux/kernel.h> +#include <linux/dim.h> #include <net/route.h> #include <net/xdp.h> #include <net/net_failover.h> @@ -172,6 +173,17 @@ struct receive_queue { struct virtnet_rq_stats stats; + /* The number of rx notifications */ + u16 calls; + + /* Is dynamic interrupt moderation enabled? */ + bool dim_enabled; + + /* Dynamic Interrupt Moderation */ + struct dim dim; + + u32 packets_in_napi; + struct virtnet_interrupt_coalesce intr_coal; /* Chain pages by the private ptr. */ @@ -305,6 +317,9 @@ struct virtnet_info { u8 duplex; u32 speed; + /* Is rx dynamic interrupt moderation enabled? */ + bool rx_dim_enabled; + /* Interrupt coalescing settings */ struct virtnet_interrupt_coalesce intr_coal_tx; struct virtnet_interrupt_coalesce intr_coal_rx; @@ -441,7 +456,7 @@ static void virtqueue_napi_schedule(struct napi_struct *napi, } } -static void virtqueue_napi_complete(struct napi_struct *napi, +static bool virtqueue_napi_complete(struct napi_struct *napi, struct virtqueue *vq, int processed) { int opaque; @@ -450,9 +465,13 @@ static void virtqueue_napi_complete(struct napi_struct *napi, if (napi_complete_done(napi, processed)) { if (unlikely(virtqueue_poll(vq, opaque))) virtqueue_napi_schedule(napi, vq); + else + return true; } else { virtqueue_disable_cb(vq); } + + return false; } static void skb_xmit_done(struct virtqueue *vq) @@ -2010,6 +2029,7 @@ static void skb_recv_done(struct virtqueue *rvq) struct virtnet_info *vi = rvq->vdev->priv; struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; + rq->calls++; virtqueue_napi_schedule(&rq->napi, rvq); } @@ -2150,6 +2170,24 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) } } +static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq) +{ + struct dim_sample cur_sample = {}; + + if (!rq->packets_in_napi) + return; + + u64_stats_update_begin(&rq->stats.syncp); + dim_update_sample(rq->calls, + u64_stats_read(&rq->stats.packets), + u64_stats_read(&rq->stats.bytes), + &cur_sample); + u64_stats_update_end(&rq->stats.syncp); + + net_dim(&rq->dim, cur_sample); + rq->packets_in_napi = 0; +} + static int virtnet_poll(struct napi_struct *napi, int budget) { struct receive_queue *rq = @@ -2158,17 +2196,22 @@ static int virtnet_poll(struct napi_struct *napi, int budget) struct send_queue *sq; unsigned int received; unsigned int xdp_xmit = 0; + bool napi_complete; virtnet_poll_cleantx(rq); received = virtnet_receive(rq, budget, &xdp_xmit); + rq->packets_in_napi += received; if (xdp_xmit & VIRTIO_XDP_REDIR) xdp_do_flush(); /* Out of packets? */ - if (received < budget) - virtqueue_napi_complete(napi, rq->vq, received); + if (received < budget) { + napi_complete = virtqueue_napi_complete(napi, rq->vq, received); + if (napi_complete && rq->dim_enabled) + virtnet_rx_dim_update(vi, rq); + } if (xdp_xmit & VIRTIO_XDP_TX) { sq = virtnet_xdp_get_sq(vi); @@ -2239,8 +2282,11 @@ err_enable_qp: disable_delayed_refill(vi); cancel_delayed_work_sync(&vi->refill); - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { virtnet_disable_queue_pair(vi, i); + cancel_work_sync(&vi->rq[i].dim.work); + } + return err; } @@ -2402,8 +2448,10 @@ static int virtnet_rx_resize(struct virtnet_info *vi, qindex = rq - vi->rq; - if (running) + if (running) { napi_disable(&rq->napi); + cancel_work_sync(&rq->dim.work); + } err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf); if (err) @@ -2650,8 +2698,10 @@ static int virtnet_close(struct net_device *dev) /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { virtnet_disable_queue_pair(vi, i); + cancel_work_sync(&vi->rq[i].dim.work); + } return 0; } @@ -2858,6 +2908,58 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi) &vi->node_dead); } +static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, + u16 vqn, u32 max_usecs, u32 max_packets) +{ + struct scatterlist sgs; + + vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn); + vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs); + vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets); + sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, + VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET, + &sgs)) + return -EINVAL; + + return 0; +} + +static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi, + u16 queue, u32 max_usecs, + u32 max_packets) +{ + int err; + + err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), + max_usecs, max_packets); + if (err) + return err; + + vi->rq[queue].intr_coal.max_usecs = max_usecs; + vi->rq[queue].intr_coal.max_packets = max_packets; + + return 0; +} + +static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi, + u16 queue, u32 max_usecs, + u32 max_packets) +{ + int err; + + err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), + max_usecs, max_packets); + if (err) + return err; + + vi->sq[queue].intr_coal.max_usecs = max_usecs; + vi->sq[queue].intr_coal.max_packets = max_packets; + + return 0; +} + static void virtnet_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, @@ -2871,9 +2973,6 @@ static void virtnet_get_ringparam(struct net_device *dev, ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); } -static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, - u16 vqn, u32 max_usecs, u32 max_packets); - static int virtnet_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, @@ -2915,14 +3014,11 @@ static int virtnet_set_ringparam(struct net_device *dev, * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver * did not set any TX coalescing parameters, to 0. */ - err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(i), - vi->intr_coal_tx.max_usecs, - vi->intr_coal_tx.max_packets); + err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i, + vi->intr_coal_tx.max_usecs, + vi->intr_coal_tx.max_packets); if (err) return err; - - vi->sq[i].intr_coal.max_usecs = vi->intr_coal_tx.max_usecs; - vi->sq[i].intr_coal.max_packets = vi->intr_coal_tx.max_packets; } if (ring->rx_pending != rx_pending) { @@ -2931,14 +3027,11 @@ static int virtnet_set_ringparam(struct net_device *dev, return err; /* The reason is same as the transmit virtqueue reset */ - err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(i), - vi->intr_coal_rx.max_usecs, - vi->intr_coal_rx.max_packets); + err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i, + vi->intr_coal_rx.max_usecs, + vi->intr_coal_rx.max_packets); if (err) return err; - - vi->rq[i].intr_coal.max_usecs = vi->intr_coal_rx.max_usecs; - vi->rq[i].intr_coal.max_packets = vi->intr_coal_rx.max_packets; } } @@ -3275,10 +3368,10 @@ static int virtnet_get_link_ksettings(struct net_device *dev, return 0; } -static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, - struct ethtool_coalesce *ec) +static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, + struct ethtool_coalesce *ec) { - struct scatterlist sgs_tx, sgs_rx; + struct scatterlist sgs_tx; int i; vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); @@ -3290,7 +3383,6 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, &sgs_tx)) return -EINVAL; - /* Save parameters */ vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; for (i = 0; i < vi->max_queue_pairs; i++) { @@ -3298,6 +3390,40 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; } + return 0; +} + +static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, + struct ethtool_coalesce *ec) +{ + bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; + struct scatterlist sgs_rx; + int i; + + if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) + return -EOPNOTSUPP; + + if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs || + ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) + return -EINVAL; + + if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { + vi->rx_dim_enabled = true; + for (i = 0; i < vi->max_queue_pairs; i++) + vi->rq[i].dim_enabled = true; + return 0; + } + + if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { + vi->rx_dim_enabled = false; + for (i = 0; i < vi->max_queue_pairs; i++) + vi->rq[i].dim_enabled = false; + } + + /* Since the per-queue coalescing params can be set, + * we need apply the global new params even if they + * are not updated. + */ vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); @@ -3307,7 +3433,6 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, &sgs_rx)) return -EINVAL; - /* Save parameters */ vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; for (i = 0; i < vi->max_queue_pairs; i++) { @@ -3318,21 +3443,55 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, return 0; } -static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, - u16 vqn, u32 max_usecs, u32 max_packets) +static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, + struct ethtool_coalesce *ec) { - struct scatterlist sgs; + int err; - vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn); - vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs); - vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets); - sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq)); + err = virtnet_send_tx_notf_coal_cmds(vi, ec); + if (err) + return err; - if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, - VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET, - &sgs)) + err = virtnet_send_rx_notf_coal_cmds(vi, ec); + if (err) + return err; + + return 0; +} + +static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi, + struct ethtool_coalesce *ec, + u16 queue) +{ + bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; + bool cur_rx_dim = vi->rq[queue].dim_enabled; + u32 max_usecs, max_packets; + int err; + + max_usecs = vi->rq[queue].intr_coal.max_usecs; + max_packets = vi->rq[queue].intr_coal.max_packets; + + if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs || + ec->rx_max_coalesced_frames != max_packets)) return -EINVAL; + if (rx_ctrl_dim_on && !cur_rx_dim) { + vi->rq[queue].dim_enabled = true; + return 0; + } + + if (!rx_ctrl_dim_on && cur_rx_dim) + vi->rq[queue].dim_enabled = false; + + /* If no params are updated, userspace ethtool will + * reject the modification. + */ + err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue, + ec->rx_coalesce_usecs, + ec->rx_max_coalesced_frames); + if (err) + return err; + return 0; } @@ -3342,27 +3501,62 @@ static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi, { int err; - err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), - ec->rx_coalesce_usecs, - ec->rx_max_coalesced_frames); + err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue); if (err) return err; - vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs; - vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames; - - err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), - ec->tx_coalesce_usecs, - ec->tx_max_coalesced_frames); + err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue, + ec->tx_coalesce_usecs, + ec->tx_max_coalesced_frames); if (err) return err; - vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs; - vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames; - return 0; } +static void virtnet_rx_dim_work(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct receive_queue *rq = container_of(dim, + struct receive_queue, dim); + struct virtnet_info *vi = rq->vq->vdev->priv; + struct net_device *dev = vi->dev; + struct dim_cq_moder update_moder; + int i, qnum, err; + + if (!rtnl_trylock()) + return; + + /* Each rxq's work is queued by "net_dim()->schedule_work()" + * in response to NAPI traffic changes. Note that dim->profile_ix + * for each rxq is updated prior to the queuing action. + * So we only need to traverse and update profiles for all rxqs + * in the work which is holding rtnl_lock. + */ + for (i = 0; i < vi->curr_queue_pairs; i++) { + rq = &vi->rq[i]; + dim = &rq->dim; + qnum = rq - vi->rq; + + if (!rq->dim_enabled) + continue; + + update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + if (update_moder.usec != rq->intr_coal.max_usecs || + update_moder.pkts != rq->intr_coal.max_packets) { + err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum, + update_moder.usec, + update_moder.pkts); + if (err) + pr_debug("%s: Failed to send dim parameters on rxq%d\n", + dev->name, qnum); + dim->state = DIM_START_MEASURE; + } + } + + rtnl_unlock(); +} + static int virtnet_coal_params_supported(struct ethtool_coalesce *ec) { /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL @@ -3444,6 +3638,7 @@ static int virtnet_get_coalesce(struct net_device *dev, ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; + ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled; } else { ec->rx_max_coalesced_frames = 1; @@ -3501,6 +3696,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev, ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; + ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; } else { ec->rx_max_coalesced_frames = 1; @@ -3548,41 +3744,60 @@ static u32 virtnet_get_rxfh_indir_size(struct net_device *dev) return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size; } -static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) +static int virtnet_get_rxfh(struct net_device *dev, + struct ethtool_rxfh_param *rxfh) { struct virtnet_info *vi = netdev_priv(dev); int i; - if (indir) { + if (rxfh->indir) { for (i = 0; i < vi->rss_indir_table_size; ++i) - indir[i] = vi->ctrl->rss.indirection_table[i]; + rxfh->indir[i] = vi->ctrl->rss.indirection_table[i]; } - if (key) - memcpy(key, vi->ctrl->rss.key, vi->rss_key_size); + if (rxfh->key) + memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size); - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + rxfh->hfunc = ETH_RSS_HASH_TOP; return 0; } -static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) +static int virtnet_set_rxfh(struct net_device *dev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct virtnet_info *vi = netdev_priv(dev); + bool update = false; int i; - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (indir) { + if (rxfh->indir) { + if (!vi->has_rss) + return -EOPNOTSUPP; + for (i = 0; i < vi->rss_indir_table_size; ++i) - vi->ctrl->rss.indirection_table[i] = indir[i]; + vi->ctrl->rss.indirection_table[i] = rxfh->indir[i]; + update = true; } - if (key) - memcpy(vi->ctrl->rss.key, key, vi->rss_key_size); - virtnet_commit_rss_command(vi); + if (rxfh->key) { + /* If either _F_HASH_REPORT or _F_RSS are negotiated, the + * device provides hash calculation capabilities, that is, + * hash_key is configured. + */ + if (!vi->has_rss && !vi->has_rss_hash_report) + return -EOPNOTSUPP; + + memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size); + update = true; + } + + if (update) + virtnet_commit_rss_command(vi); return 0; } @@ -3626,7 +3841,7 @@ static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) static const struct ethtool_ops virtnet_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | - ETHTOOL_COALESCE_USECS, + ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, .get_drvinfo = virtnet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = virtnet_get_ringparam, @@ -4204,6 +4419,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) virtnet_poll_tx, napi_tx ? napi_weight : 0); + INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work); + vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); @@ -4484,13 +4702,15 @@ static int virtnet_probe(struct virtio_device *vdev) if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) vi->has_rss_hash_report = true; - if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) + if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) { vi->has_rss = true; - if (vi->has_rss || vi->has_rss_hash_report) { vi->rss_indir_table_size = virtio_cread16(vdev, offsetof(struct virtio_net_config, rss_max_indirection_table_length)); + } + + if (vi->has_rss || vi->has_rss_hash_report) { vi->rss_key_size = virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size)); |