diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 18:24:20 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 18:24:20 +0000 |
commit | 483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch) | |
tree | e5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/spdk/dpdk/examples/qos_sched/app_thread.c | |
parent | Initial commit. (diff) | |
download | ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip |
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/examples/qos_sched/app_thread.c')
-rw-r--r-- | src/spdk/dpdk/examples/qos_sched/app_thread.c | 264 |
1 files changed, 264 insertions, 0 deletions
diff --git a/src/spdk/dpdk/examples/qos_sched/app_thread.c b/src/spdk/dpdk/examples/qos_sched/app_thread.c new file mode 100644 index 00000000..a5927423 --- /dev/null +++ b/src/spdk/dpdk/examples/qos_sched/app_thread.c @@ -0,0 +1,264 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#include <stdint.h> + +#include <rte_log.h> +#include <rte_mbuf.h> +#include <rte_malloc.h> +#include <rte_cycles.h> +#include <rte_ethdev.h> +#include <rte_memcpy.h> +#include <rte_byteorder.h> +#include <rte_branch_prediction.h> +#include <rte_sched.h> + +#include "main.h" + +/* + * QoS parameters are encoded as follows: + * Outer VLAN ID defines subport + * Inner VLAN ID defines pipe + * Destination IP 0.0.XXX.0 defines traffic class + * Destination IP host (0.0.0.XXX) defines queue + * Values below define offset to each field from start of frame + */ +#define SUBPORT_OFFSET 7 +#define PIPE_OFFSET 9 +#define TC_OFFSET 20 +#define QUEUE_OFFSET 20 +#define COLOR_OFFSET 19 + +static inline int +get_pkt_sched(struct rte_mbuf *m, uint32_t *subport, uint32_t *pipe, + uint32_t *traffic_class, uint32_t *queue, uint32_t *color) +{ + uint16_t *pdata = rte_pktmbuf_mtod(m, uint16_t *); + + *subport = (rte_be_to_cpu_16(pdata[SUBPORT_OFFSET]) & 0x0FFF) & + (port_params.n_subports_per_port - 1); /* Outer VLAN ID*/ + *pipe = (rte_be_to_cpu_16(pdata[PIPE_OFFSET]) & 0x0FFF) & + (port_params.n_pipes_per_subport - 1); /* Inner VLAN ID */ + *traffic_class = (pdata[QUEUE_OFFSET] & 0x0F) & + (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1); /* Destination IP */ + *queue = ((pdata[QUEUE_OFFSET] >> 8) & 0x0F) & + (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1) ; /* Destination IP */ + *color = pdata[COLOR_OFFSET] & 0x03; /* Destination IP */ + + return 0; +} + +void +app_rx_thread(struct thread_conf **confs) +{ + uint32_t i, nb_rx; + struct rte_mbuf *rx_mbufs[burst_conf.rx_burst] __rte_cache_aligned; + struct thread_conf *conf; + int conf_idx = 0; + + uint32_t subport; + uint32_t pipe; + uint32_t traffic_class; + uint32_t queue; + uint32_t color; + + while ((conf = confs[conf_idx])) { + nb_rx = rte_eth_rx_burst(conf->rx_port, conf->rx_queue, rx_mbufs, + burst_conf.rx_burst); + + if (likely(nb_rx != 0)) { + APP_STATS_ADD(conf->stat.nb_rx, nb_rx); + + for(i = 0; i < nb_rx; i++) { + get_pkt_sched(rx_mbufs[i], + &subport, &pipe, &traffic_class, &queue, &color); + rte_sched_port_pkt_write(rx_mbufs[i], subport, pipe, + traffic_class, queue, (enum rte_meter_color) color); + } + + if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring, + (void **)rx_mbufs, nb_rx, NULL) == 0)) { + for(i = 0; i < nb_rx; i++) { + rte_pktmbuf_free(rx_mbufs[i]); + + APP_STATS_ADD(conf->stat.nb_drop, 1); + } + } + } + conf_idx++; + if (confs[conf_idx] == NULL) + conf_idx = 0; + } +} + + + +/* Send the packet to an output interface + * For performance reason function returns number of packets dropped, not sent, + * so 0 means that all packets were sent successfully + */ + +static inline void +app_send_burst(struct thread_conf *qconf) +{ + struct rte_mbuf **mbufs; + uint32_t n, ret; + + mbufs = (struct rte_mbuf **)qconf->m_table; + n = qconf->n_mbufs; + + do { + ret = rte_eth_tx_burst(qconf->tx_port, qconf->tx_queue, mbufs, (uint16_t)n); + /* we cannot drop the packets, so re-send */ + /* update number of packets to be sent */ + n -= ret; + mbufs = (struct rte_mbuf **)&mbufs[ret]; + } while (n); +} + + +/* Send the packet to an output interface */ +static void +app_send_packets(struct thread_conf *qconf, struct rte_mbuf **mbufs, uint32_t nb_pkt) +{ + uint32_t i, len; + + len = qconf->n_mbufs; + for(i = 0; i < nb_pkt; i++) { + qconf->m_table[len] = mbufs[i]; + len++; + /* enough pkts to be sent */ + if (unlikely(len == burst_conf.tx_burst)) { + qconf->n_mbufs = len; + app_send_burst(qconf); + len = 0; + } + } + + qconf->n_mbufs = len; +} + +void +app_tx_thread(struct thread_conf **confs) +{ + struct rte_mbuf *mbufs[burst_conf.qos_dequeue]; + struct thread_conf *conf; + int conf_idx = 0; + int retval; + const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; + + while ((conf = confs[conf_idx])) { + retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs, + burst_conf.qos_dequeue, NULL); + if (likely(retval != 0)) { + app_send_packets(conf, mbufs, burst_conf.qos_dequeue); + + conf->counter = 0; /* reset empty read loop counter */ + } + + conf->counter++; + + /* drain ring and TX queues */ + if (unlikely(conf->counter > drain_tsc)) { + /* now check is there any packets left to be transmitted */ + if (conf->n_mbufs != 0) { + app_send_burst(conf); + + conf->n_mbufs = 0; + } + conf->counter = 0; + } + + conf_idx++; + if (confs[conf_idx] == NULL) + conf_idx = 0; + } +} + + +void +app_worker_thread(struct thread_conf **confs) +{ + struct rte_mbuf *mbufs[burst_conf.ring_burst]; + struct thread_conf *conf; + int conf_idx = 0; + + while ((conf = confs[conf_idx])) { + uint32_t nb_pkt; + + /* Read packet from the ring */ + nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs, + burst_conf.ring_burst, NULL); + if (likely(nb_pkt)) { + int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs, + nb_pkt); + + APP_STATS_ADD(conf->stat.nb_drop, nb_pkt - nb_sent); + APP_STATS_ADD(conf->stat.nb_rx, nb_pkt); + } + + nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs, + burst_conf.qos_dequeue); + if (likely(nb_pkt > 0)) + while (rte_ring_sp_enqueue_bulk(conf->tx_ring, + (void **)mbufs, nb_pkt, NULL) == 0) + ; /* empty body */ + + conf_idx++; + if (confs[conf_idx] == NULL) + conf_idx = 0; + } +} + + +void +app_mixed_thread(struct thread_conf **confs) +{ + struct rte_mbuf *mbufs[burst_conf.ring_burst]; + struct thread_conf *conf; + int conf_idx = 0; + const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; + + while ((conf = confs[conf_idx])) { + uint32_t nb_pkt; + + /* Read packet from the ring */ + nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs, + burst_conf.ring_burst, NULL); + if (likely(nb_pkt)) { + int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs, + nb_pkt); + + APP_STATS_ADD(conf->stat.nb_drop, nb_pkt - nb_sent); + APP_STATS_ADD(conf->stat.nb_rx, nb_pkt); + } + + + nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs, + burst_conf.qos_dequeue); + if (likely(nb_pkt > 0)) { + app_send_packets(conf, mbufs, nb_pkt); + + conf->counter = 0; /* reset empty read loop counter */ + } + + conf->counter++; + + /* drain ring and TX queues */ + if (unlikely(conf->counter > drain_tsc)) { + + /* now check is there any packets left to be transmitted */ + if (conf->n_mbufs != 0) { + app_send_burst(conf); + + conf->n_mbufs = 0; + } + conf->counter = 0; + } + + conf_idx++; + if (confs[conf_idx] == NULL) + conf_idx = 0; + } +} |