diff options
Diffstat (limited to 'src/spdk/dpdk/lib/librte_kni')
-rw-r--r-- | src/spdk/dpdk/lib/librte_kni/Makefile | 20 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_kni/meson.build | 12 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_kni/rte_kni.c | 830 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_kni/rte_kni.h | 270 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_kni/rte_kni_fifo.h | 117 | ||||
-rw-r--r-- | src/spdk/dpdk/lib/librte_kni/rte_kni_version.map | 23 |
6 files changed, 1272 insertions, 0 deletions
diff --git a/src/spdk/dpdk/lib/librte_kni/Makefile b/src/spdk/dpdk/lib/librte_kni/Makefile new file mode 100644 index 000000000..9d440aa13 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_kni/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_kni.a + +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -fno-strict-aliasing +LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_ethdev + +EXPORT_MAP := rte_kni_version.map + +# all source are stored in SRCS-y +SRCS-$(CONFIG_RTE_LIBRTE_KNI) := rte_kni.c + +# install includes +SYMLINK-$(CONFIG_RTE_LIBRTE_KNI)-include := rte_kni.h + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/lib/librte_kni/meson.build b/src/spdk/dpdk/lib/librte_kni/meson.build new file mode 100644 index 000000000..425989d07 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_kni/meson.build @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +if not is_linux or not dpdk_conf.get('RTE_ARCH_64') + build = false + reason = 'only supported on 64-bit linux' +endif +sources = files('rte_kni.c') +headers = files('rte_kni.h') +deps += ['ethdev', 'pci'] +build = false +reason = 'not needed by SPDK' diff --git a/src/spdk/dpdk/lib/librte_kni/rte_kni.c b/src/spdk/dpdk/lib/librte_kni/rte_kni.c new file mode 100644 index 000000000..bcf82cc2d --- /dev/null +++ b/src/spdk/dpdk/lib/librte_kni/rte_kni.c @@ -0,0 +1,830 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef RTE_EXEC_ENV_LINUX +#error "KNI is not supported" +#endif + +#include <string.h> +#include <fcntl.h> +#include <unistd.h> +#include <sys/ioctl.h> +#include <linux/version.h> + +#include <rte_spinlock.h> +#include <rte_string_fns.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> +#include <rte_log.h> +#include <rte_kni.h> +#include <rte_memzone.h> +#include <rte_tailq.h> +#include <rte_rwlock.h> +#include <rte_eal_memconfig.h> +#include <rte_kni_common.h> +#include "rte_kni_fifo.h" + +#define MAX_MBUF_BURST_NUM 32 + +/* Maximum number of ring entries */ +#define KNI_FIFO_COUNT_MAX 1024 +#define KNI_FIFO_SIZE (KNI_FIFO_COUNT_MAX * sizeof(void *) + \ + sizeof(struct rte_kni_fifo)) + +#define KNI_REQUEST_MBUF_NUM_MAX 32 + +#define KNI_MEM_CHECK(cond, fail) do { if (cond) goto fail; } while (0) + +#define KNI_MZ_NAME_FMT "kni_info_%s" +#define KNI_TX_Q_MZ_NAME_FMT "kni_tx_%s" +#define KNI_RX_Q_MZ_NAME_FMT "kni_rx_%s" +#define KNI_ALLOC_Q_MZ_NAME_FMT "kni_alloc_%s" +#define KNI_FREE_Q_MZ_NAME_FMT "kni_free_%s" +#define KNI_REQ_Q_MZ_NAME_FMT "kni_req_%s" +#define KNI_RESP_Q_MZ_NAME_FMT "kni_resp_%s" +#define KNI_SYNC_ADDR_MZ_NAME_FMT "kni_sync_%s" + +TAILQ_HEAD(rte_kni_list, rte_tailq_entry); + +static struct rte_tailq_elem rte_kni_tailq = { + .name = "RTE_KNI", +}; +EAL_REGISTER_TAILQ(rte_kni_tailq) + +/** + * KNI context + */ +struct rte_kni { + char name[RTE_KNI_NAMESIZE]; /**< KNI interface name */ + uint16_t group_id; /**< Group ID of KNI devices */ + uint32_t slot_id; /**< KNI pool slot ID */ + struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */ + unsigned int mbuf_size; /**< mbuf size */ + + const struct rte_memzone *m_tx_q; /**< TX queue memzone */ + const struct rte_memzone *m_rx_q; /**< RX queue memzone */ + const struct rte_memzone *m_alloc_q;/**< Alloc queue memzone */ + const struct rte_memzone *m_free_q; /**< Free queue memzone */ + + struct rte_kni_fifo *tx_q; /**< TX queue */ + struct rte_kni_fifo *rx_q; /**< RX queue */ + struct rte_kni_fifo *alloc_q; /**< Allocated mbufs queue */ + struct rte_kni_fifo *free_q; /**< To be freed mbufs queue */ + + const struct rte_memzone *m_req_q; /**< Request queue memzone */ + const struct rte_memzone *m_resp_q; /**< Response queue memzone */ + const struct rte_memzone *m_sync_addr;/**< Sync addr memzone */ + + /* For request & response */ + struct rte_kni_fifo *req_q; /**< Request queue */ + struct rte_kni_fifo *resp_q; /**< Response queue */ + void *sync_addr; /**< Req/Resp Mem address */ + + struct rte_kni_ops ops; /**< operations for request */ +}; + +enum kni_ops_status { + KNI_REQ_NO_REGISTER = 0, + KNI_REQ_REGISTERED, +}; + +static void kni_free_mbufs(struct rte_kni *kni); +static void kni_allocate_mbufs(struct rte_kni *kni); + +static volatile int kni_fd = -1; + +/* Shall be called before any allocation happens */ +int +rte_kni_init(unsigned int max_kni_ifaces __rte_unused) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) + if (rte_eal_iova_mode() != RTE_IOVA_PA) { + RTE_LOG(ERR, KNI, "KNI requires IOVA as PA\n"); + return -1; + } +#endif + + /* Check FD and open */ + if (kni_fd < 0) { + kni_fd = open("/dev/" KNI_DEVICE, O_RDWR); + if (kni_fd < 0) { + RTE_LOG(ERR, KNI, + "Can not open /dev/%s\n", KNI_DEVICE); + return -1; + } + } + + return 0; +} + +static struct rte_kni * +__rte_kni_get(const char *name) +{ + struct rte_kni *kni; + struct rte_tailq_entry *te; + struct rte_kni_list *kni_list; + + kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list); + + TAILQ_FOREACH(te, kni_list, next) { + kni = te->data; + if (strncmp(name, kni->name, RTE_KNI_NAMESIZE) == 0) + break; + } + + if (te == NULL) + kni = NULL; + + return kni; +} + +static int +kni_reserve_mz(struct rte_kni *kni) +{ + char mz_name[RTE_MEMZONE_NAMESIZE]; + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_TX_Q_MZ_NAME_FMT, kni->name); + kni->m_tx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG); + KNI_MEM_CHECK(kni->m_tx_q == NULL, tx_q_fail); + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RX_Q_MZ_NAME_FMT, kni->name); + kni->m_rx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG); + KNI_MEM_CHECK(kni->m_rx_q == NULL, rx_q_fail); + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_ALLOC_Q_MZ_NAME_FMT, kni->name); + kni->m_alloc_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG); + KNI_MEM_CHECK(kni->m_alloc_q == NULL, alloc_q_fail); + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_FREE_Q_MZ_NAME_FMT, kni->name); + kni->m_free_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG); + KNI_MEM_CHECK(kni->m_free_q == NULL, free_q_fail); + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_REQ_Q_MZ_NAME_FMT, kni->name); + kni->m_req_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG); + KNI_MEM_CHECK(kni->m_req_q == NULL, req_q_fail); + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RESP_Q_MZ_NAME_FMT, kni->name); + kni->m_resp_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG); + KNI_MEM_CHECK(kni->m_resp_q == NULL, resp_q_fail); + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_SYNC_ADDR_MZ_NAME_FMT, kni->name); + kni->m_sync_addr = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG); + KNI_MEM_CHECK(kni->m_sync_addr == NULL, sync_addr_fail); + + return 0; + +sync_addr_fail: + rte_memzone_free(kni->m_resp_q); +resp_q_fail: + rte_memzone_free(kni->m_req_q); +req_q_fail: + rte_memzone_free(kni->m_free_q); +free_q_fail: + rte_memzone_free(kni->m_alloc_q); +alloc_q_fail: + rte_memzone_free(kni->m_rx_q); +rx_q_fail: + rte_memzone_free(kni->m_tx_q); +tx_q_fail: + return -1; +} + +static void +kni_release_mz(struct rte_kni *kni) +{ + rte_memzone_free(kni->m_tx_q); + rte_memzone_free(kni->m_rx_q); + rte_memzone_free(kni->m_alloc_q); + rte_memzone_free(kni->m_free_q); + rte_memzone_free(kni->m_req_q); + rte_memzone_free(kni->m_resp_q); + rte_memzone_free(kni->m_sync_addr); +} + +struct rte_kni * +rte_kni_alloc(struct rte_mempool *pktmbuf_pool, + const struct rte_kni_conf *conf, + struct rte_kni_ops *ops) +{ + int ret; + struct rte_kni_device_info dev_info; + struct rte_kni *kni; + struct rte_tailq_entry *te; + struct rte_kni_list *kni_list; + + if (!pktmbuf_pool || !conf || !conf->name[0]) + return NULL; + + /* Check if KNI subsystem has been initialized */ + if (kni_fd < 0) { + RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n"); + return NULL; + } + + rte_mcfg_tailq_write_lock(); + + kni = __rte_kni_get(conf->name); + if (kni != NULL) { + RTE_LOG(ERR, KNI, "KNI already exists\n"); + goto unlock; + } + + te = rte_zmalloc("KNI_TAILQ_ENTRY", sizeof(*te), 0); + if (te == NULL) { + RTE_LOG(ERR, KNI, "Failed to allocate tailq entry\n"); + goto unlock; + } + + kni = rte_zmalloc("KNI", sizeof(struct rte_kni), RTE_CACHE_LINE_SIZE); + if (kni == NULL) { + RTE_LOG(ERR, KNI, "KNI memory allocation failed\n"); + goto kni_fail; + } + + strlcpy(kni->name, conf->name, RTE_KNI_NAMESIZE); + + if (ops) + memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops)); + else + kni->ops.port_id = UINT16_MAX; + + memset(&dev_info, 0, sizeof(dev_info)); + dev_info.core_id = conf->core_id; + dev_info.force_bind = conf->force_bind; + dev_info.group_id = conf->group_id; + dev_info.mbuf_size = conf->mbuf_size; + dev_info.mtu = conf->mtu; + dev_info.min_mtu = conf->min_mtu; + dev_info.max_mtu = conf->max_mtu; + + memcpy(dev_info.mac_addr, conf->mac_addr, RTE_ETHER_ADDR_LEN); + + strlcpy(dev_info.name, conf->name, RTE_KNI_NAMESIZE); + + ret = kni_reserve_mz(kni); + if (ret < 0) + goto mz_fail; + + /* TX RING */ + kni->tx_q = kni->m_tx_q->addr; + kni_fifo_init(kni->tx_q, KNI_FIFO_COUNT_MAX); + dev_info.tx_phys = kni->m_tx_q->phys_addr; + + /* RX RING */ + kni->rx_q = kni->m_rx_q->addr; + kni_fifo_init(kni->rx_q, KNI_FIFO_COUNT_MAX); + dev_info.rx_phys = kni->m_rx_q->phys_addr; + + /* ALLOC RING */ + kni->alloc_q = kni->m_alloc_q->addr; + kni_fifo_init(kni->alloc_q, KNI_FIFO_COUNT_MAX); + dev_info.alloc_phys = kni->m_alloc_q->phys_addr; + + /* FREE RING */ + kni->free_q = kni->m_free_q->addr; + kni_fifo_init(kni->free_q, KNI_FIFO_COUNT_MAX); + dev_info.free_phys = kni->m_free_q->phys_addr; + + /* Request RING */ + kni->req_q = kni->m_req_q->addr; + kni_fifo_init(kni->req_q, KNI_FIFO_COUNT_MAX); + dev_info.req_phys = kni->m_req_q->phys_addr; + + /* Response RING */ + kni->resp_q = kni->m_resp_q->addr; + kni_fifo_init(kni->resp_q, KNI_FIFO_COUNT_MAX); + dev_info.resp_phys = kni->m_resp_q->phys_addr; + + /* Req/Resp sync mem area */ + kni->sync_addr = kni->m_sync_addr->addr; + dev_info.sync_va = kni->m_sync_addr->addr; + dev_info.sync_phys = kni->m_sync_addr->phys_addr; + + kni->pktmbuf_pool = pktmbuf_pool; + kni->group_id = conf->group_id; + kni->mbuf_size = conf->mbuf_size; + + dev_info.iova_mode = (rte_eal_iova_mode() == RTE_IOVA_VA) ? 1 : 0; + + ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info); + if (ret < 0) + goto ioctl_fail; + + te->data = kni; + + kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list); + TAILQ_INSERT_TAIL(kni_list, te, next); + + rte_mcfg_tailq_write_unlock(); + + /* Allocate mbufs and then put them into alloc_q */ + kni_allocate_mbufs(kni); + + return kni; + +ioctl_fail: + kni_release_mz(kni); +mz_fail: + rte_free(kni); +kni_fail: + rte_free(te); +unlock: + rte_mcfg_tailq_write_unlock(); + + return NULL; +} + +static void +kni_free_fifo(struct rte_kni_fifo *fifo) +{ + int ret; + struct rte_mbuf *pkt; + + do { + ret = kni_fifo_get(fifo, (void **)&pkt, 1); + if (ret) + rte_pktmbuf_free(pkt); + } while (ret); +} + +static void * +va2pa(struct rte_mbuf *m) +{ + return (void *)((unsigned long)m - + ((unsigned long)m->buf_addr - + (unsigned long)m->buf_iova)); +} + +static void * +va2pa_all(struct rte_mbuf *mbuf) +{ + void *phy_mbuf = va2pa(mbuf); + struct rte_mbuf *next = mbuf->next; + while (next) { + mbuf->next = va2pa(next); + mbuf = next; + next = mbuf->next; + } + return phy_mbuf; +} + +static void +obj_free(struct rte_mempool *mp __rte_unused, void *opaque, void *obj, + unsigned obj_idx __rte_unused) +{ + struct rte_mbuf *m = obj; + void *mbuf_phys = opaque; + + if (va2pa(m) == mbuf_phys) + rte_pktmbuf_free(m); +} + +static void +kni_free_fifo_phy(struct rte_mempool *mp, struct rte_kni_fifo *fifo) +{ + void *mbuf_phys; + int ret; + + do { + ret = kni_fifo_get(fifo, &mbuf_phys, 1); + if (ret) + rte_mempool_obj_iter(mp, obj_free, mbuf_phys); + } while (ret); +} + +int +rte_kni_release(struct rte_kni *kni) +{ + struct rte_tailq_entry *te; + struct rte_kni_list *kni_list; + struct rte_kni_device_info dev_info; + uint32_t retry = 5; + + if (!kni) + return -1; + + kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list); + + rte_mcfg_tailq_write_lock(); + + TAILQ_FOREACH(te, kni_list, next) { + if (te->data == kni) + break; + } + + if (te == NULL) + goto unlock; + + strlcpy(dev_info.name, kni->name, sizeof(dev_info.name)); + if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) { + RTE_LOG(ERR, KNI, "Fail to release kni device\n"); + goto unlock; + } + + TAILQ_REMOVE(kni_list, te, next); + + rte_mcfg_tailq_write_unlock(); + + /* mbufs in all fifo should be released, except request/response */ + + /* wait until all rxq packets processed by kernel */ + while (kni_fifo_count(kni->rx_q) && retry--) + usleep(1000); + + if (kni_fifo_count(kni->rx_q)) + RTE_LOG(ERR, KNI, "Fail to free all Rx-q items\n"); + + kni_free_fifo_phy(kni->pktmbuf_pool, kni->alloc_q); + kni_free_fifo(kni->tx_q); + kni_free_fifo(kni->free_q); + + kni_release_mz(kni); + + rte_free(kni); + + rte_free(te); + + return 0; + +unlock: + rte_mcfg_tailq_write_unlock(); + + return -1; +} + +/* default callback for request of configuring device mac address */ +static int +kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[]) +{ + int ret = 0; + + if (!rte_eth_dev_is_valid_port(port_id)) { + RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id); + return -EINVAL; + } + + RTE_LOG(INFO, KNI, "Configure mac address of %d", port_id); + + ret = rte_eth_dev_default_mac_addr_set(port_id, + (struct rte_ether_addr *)mac_addr); + if (ret < 0) + RTE_LOG(ERR, KNI, "Failed to config mac_addr for port %d\n", + port_id); + + return ret; +} + +/* default callback for request of configuring promiscuous mode */ +static int +kni_config_promiscusity(uint16_t port_id, uint8_t to_on) +{ + int ret; + + if (!rte_eth_dev_is_valid_port(port_id)) { + RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id); + return -EINVAL; + } + + RTE_LOG(INFO, KNI, "Configure promiscuous mode of %d to %d\n", + port_id, to_on); + + if (to_on) + ret = rte_eth_promiscuous_enable(port_id); + else + ret = rte_eth_promiscuous_disable(port_id); + + if (ret != 0) + RTE_LOG(ERR, KNI, + "Failed to %s promiscuous mode for port %u: %s\n", + to_on ? "enable" : "disable", port_id, + rte_strerror(-ret)); + + return ret; +} + +/* default callback for request of configuring allmulticast mode */ +static int +kni_config_allmulticast(uint16_t port_id, uint8_t to_on) +{ + if (!rte_eth_dev_is_valid_port(port_id)) { + RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id); + return -EINVAL; + } + + RTE_LOG(INFO, KNI, "Configure allmulticast mode of %d to %d\n", + port_id, to_on); + + if (to_on) + rte_eth_allmulticast_enable(port_id); + else + rte_eth_allmulticast_disable(port_id); + + return 0; +} + +int +rte_kni_handle_request(struct rte_kni *kni) +{ + unsigned int ret; + struct rte_kni_request *req = NULL; + + if (kni == NULL) + return -1; + + /* Get request mbuf */ + ret = kni_fifo_get(kni->req_q, (void **)&req, 1); + if (ret != 1) + return 0; /* It is OK of can not getting the request mbuf */ + + if (req != kni->sync_addr) { + RTE_LOG(ERR, KNI, "Wrong req pointer %p\n", req); + return -1; + } + + /* Analyze the request and call the relevant actions for it */ + switch (req->req_id) { + case RTE_KNI_REQ_CHANGE_MTU: /* Change MTU */ + if (kni->ops.change_mtu) + req->result = kni->ops.change_mtu(kni->ops.port_id, + req->new_mtu); + break; + case RTE_KNI_REQ_CFG_NETWORK_IF: /* Set network interface up/down */ + if (kni->ops.config_network_if) + req->result = kni->ops.config_network_if(kni->ops.port_id, + req->if_up); + break; + case RTE_KNI_REQ_CHANGE_MAC_ADDR: /* Change MAC Address */ + if (kni->ops.config_mac_address) + req->result = kni->ops.config_mac_address( + kni->ops.port_id, req->mac_addr); + else if (kni->ops.port_id != UINT16_MAX) + req->result = kni_config_mac_address( + kni->ops.port_id, req->mac_addr); + break; + case RTE_KNI_REQ_CHANGE_PROMISC: /* Change PROMISCUOUS MODE */ + if (kni->ops.config_promiscusity) + req->result = kni->ops.config_promiscusity( + kni->ops.port_id, req->promiscusity); + else if (kni->ops.port_id != UINT16_MAX) + req->result = kni_config_promiscusity( + kni->ops.port_id, req->promiscusity); + break; + case RTE_KNI_REQ_CHANGE_ALLMULTI: /* Change ALLMULTICAST MODE */ + if (kni->ops.config_allmulticast) + req->result = kni->ops.config_allmulticast( + kni->ops.port_id, req->allmulti); + else if (kni->ops.port_id != UINT16_MAX) + req->result = kni_config_allmulticast( + kni->ops.port_id, req->allmulti); + break; + default: + RTE_LOG(ERR, KNI, "Unknown request id %u\n", req->req_id); + req->result = -EINVAL; + break; + } + + /* Construct response mbuf and put it back to resp_q */ + ret = kni_fifo_put(kni->resp_q, (void **)&req, 1); + if (ret != 1) { + RTE_LOG(ERR, KNI, "Fail to put the muf back to resp_q\n"); + return -1; /* It is an error of can't putting the mbuf back */ + } + + return 0; +} + +unsigned +rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num) +{ + num = RTE_MIN(kni_fifo_free_count(kni->rx_q), num); + void *phy_mbufs[num]; + unsigned int ret; + unsigned int i; + + for (i = 0; i < num; i++) + phy_mbufs[i] = va2pa_all(mbufs[i]); + + ret = kni_fifo_put(kni->rx_q, phy_mbufs, num); + + /* Get mbufs from free_q and then free them */ + kni_free_mbufs(kni); + + return ret; +} + +unsigned +rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num) +{ + unsigned int ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num); + + /* If buffers removed, allocate mbufs and then put them into alloc_q */ + if (ret) + kni_allocate_mbufs(kni); + + return ret; +} + +static void +kni_free_mbufs(struct rte_kni *kni) +{ + int i, ret; + struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM]; + + ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM); + if (likely(ret > 0)) { + for (i = 0; i < ret; i++) + rte_pktmbuf_free(pkts[i]); + } +} + +static void +kni_allocate_mbufs(struct rte_kni *kni) +{ + int i, ret; + struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM]; + void *phys[MAX_MBUF_BURST_NUM]; + int allocq_free; + + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) != + offsetof(struct rte_kni_mbuf, pool)); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_addr) != + offsetof(struct rte_kni_mbuf, buf_addr)); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, next) != + offsetof(struct rte_kni_mbuf, next)); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) != + offsetof(struct rte_kni_mbuf, data_off)); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_kni_mbuf, data_len)); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_kni_mbuf, pkt_len)); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_kni_mbuf, ol_flags)); + + /* Check if pktmbuf pool has been configured */ + if (kni->pktmbuf_pool == NULL) { + RTE_LOG(ERR, KNI, "No valid mempool for allocating mbufs\n"); + return; + } + + allocq_free = (kni->alloc_q->read - kni->alloc_q->write - 1) + & (MAX_MBUF_BURST_NUM - 1); + for (i = 0; i < allocq_free; i++) { + pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool); + if (unlikely(pkts[i] == NULL)) { + /* Out of memory */ + RTE_LOG(ERR, KNI, "Out of memory\n"); + break; + } + phys[i] = va2pa(pkts[i]); + } + + /* No pkt mbuf allocated */ + if (i <= 0) + return; + + ret = kni_fifo_put(kni->alloc_q, phys, i); + + /* Check if any mbufs not put into alloc_q, and then free them */ + if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) { + int j; + + for (j = ret; j < i; j++) + rte_pktmbuf_free(pkts[j]); + } +} + +struct rte_kni * +rte_kni_get(const char *name) +{ + struct rte_kni *kni; + + if (name == NULL || name[0] == '\0') + return NULL; + + rte_mcfg_tailq_read_lock(); + + kni = __rte_kni_get(name); + + rte_mcfg_tailq_read_unlock(); + + return kni; +} + +const char * +rte_kni_get_name(const struct rte_kni *kni) +{ + return kni->name; +} + +static enum kni_ops_status +kni_check_request_register(struct rte_kni_ops *ops) +{ + /* check if KNI request ops has been registered*/ + if (ops == NULL) + return KNI_REQ_NO_REGISTER; + + if (ops->change_mtu == NULL + && ops->config_network_if == NULL + && ops->config_mac_address == NULL + && ops->config_promiscusity == NULL + && ops->config_allmulticast == NULL) + return KNI_REQ_NO_REGISTER; + + return KNI_REQ_REGISTERED; +} + +int +rte_kni_register_handlers(struct rte_kni *kni, struct rte_kni_ops *ops) +{ + enum kni_ops_status req_status; + + if (ops == NULL) { + RTE_LOG(ERR, KNI, "Invalid KNI request operation.\n"); + return -1; + } + + if (kni == NULL) { + RTE_LOG(ERR, KNI, "Invalid kni info.\n"); + return -1; + } + + req_status = kni_check_request_register(&kni->ops); + if (req_status == KNI_REQ_REGISTERED) { + RTE_LOG(ERR, KNI, "The KNI request operation has already registered.\n"); + return -1; + } + + memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops)); + return 0; +} + +int +rte_kni_unregister_handlers(struct rte_kni *kni) +{ + if (kni == NULL) { + RTE_LOG(ERR, KNI, "Invalid kni info.\n"); + return -1; + } + + memset(&kni->ops, 0, sizeof(struct rte_kni_ops)); + + return 0; +} + +int +rte_kni_update_link(struct rte_kni *kni, unsigned int linkup) +{ + char path[64]; + char old_carrier[2]; + const char *new_carrier; + int old_linkup; + int fd, ret; + + if (kni == NULL) + return -1; + + snprintf(path, sizeof(path), "/sys/devices/virtual/net/%s/carrier", + kni->name); + + fd = open(path, O_RDWR); + if (fd == -1) { + RTE_LOG(ERR, KNI, "Failed to open file: %s.\n", path); + return -1; + } + + ret = read(fd, old_carrier, 2); + if (ret < 1) { + close(fd); + return -1; + } + old_linkup = (old_carrier[0] == '1'); + + new_carrier = linkup ? "1" : "0"; + ret = write(fd, new_carrier, 1); + if (ret < 1) { + RTE_LOG(ERR, KNI, "Failed to write file: %s.\n", path); + close(fd); + return -1; + } + + close(fd); + return old_linkup; +} + +void +rte_kni_close(void) +{ + if (kni_fd < 0) + return; + + close(kni_fd); + kni_fd = -1; +} diff --git a/src/spdk/dpdk/lib/librte_kni/rte_kni.h b/src/spdk/dpdk/lib/librte_kni/rte_kni.h new file mode 100644 index 000000000..f1bb782c6 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_kni/rte_kni.h @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_KNI_H_ +#define _RTE_KNI_H_ + +/** + * @file + * RTE KNI + * + * The KNI library provides the ability to create and destroy kernel NIC + * interfaces that may be used by the RTE application to receive/transmit + * packets from/to Linux kernel net interfaces. + * + * This library provides two APIs to burst receive packets from KNI interfaces, + * and burst transmit packets to KNI interfaces. + */ + +#include <rte_pci.h> +#include <rte_memory.h> +#include <rte_mempool.h> +#include <rte_ether.h> + +#include <rte_kni_common.h> + +#ifdef __cplusplus +extern "C" { +#endif + +struct rte_kni; +struct rte_mbuf; + +/** + * Structure which has the function pointers for KNI interface. + */ +struct rte_kni_ops { + uint16_t port_id; /* Port ID */ + + /* Pointer to function of changing MTU */ + int (*change_mtu)(uint16_t port_id, unsigned int new_mtu); + + /* Pointer to function of configuring network interface */ + int (*config_network_if)(uint16_t port_id, uint8_t if_up); + + /* Pointer to function of configuring mac address */ + int (*config_mac_address)(uint16_t port_id, uint8_t mac_addr[]); + + /* Pointer to function of configuring promiscuous mode */ + int (*config_promiscusity)(uint16_t port_id, uint8_t to_on); + + /* Pointer to function of configuring allmulticast mode */ + int (*config_allmulticast)(uint16_t port_id, uint8_t to_on); +}; + +/** + * Structure for configuring KNI device. + */ +struct rte_kni_conf { + /* + * KNI name which will be used in relevant network device. + * Let the name as short as possible, as it will be part of + * memzone name. + */ + char name[RTE_KNI_NAMESIZE]; + uint32_t core_id; /* Core ID to bind kernel thread on */ + uint16_t group_id; /* Group ID */ + unsigned mbuf_size; /* mbuf size */ + struct rte_pci_addr addr; /* depreciated */ + struct rte_pci_id id; /* depreciated */ + + __extension__ + uint8_t force_bind : 1; /* Flag to bind kernel thread */ + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; /* MAC address assigned to KNI */ + uint16_t mtu; + uint16_t min_mtu; + uint16_t max_mtu; +}; + +/** + * Initialize and preallocate KNI subsystem + * + * This function is to be executed on the MASTER lcore only, after EAL + * initialization and before any KNI interface is attempted to be + * allocated + * + * @param max_kni_ifaces + * The maximum number of KNI interfaces that can coexist concurrently + * + * @return + * - 0 indicates success. + * - negative value indicates failure. + */ +int rte_kni_init(unsigned int max_kni_ifaces); + + +/** + * Allocate KNI interface according to the port id, mbuf size, mbuf pool, + * configurations and callbacks for kernel requests.The KNI interface created + * in the kernel space is the net interface the traditional Linux application + * talking to. + * + * The rte_kni_alloc shall not be called before rte_kni_init() has been + * called. rte_kni_alloc is thread safe. + * + * The mempool should have capacity of more than "2 x KNI_FIFO_COUNT_MAX" + * elements for each KNI interface allocated. + * + * @param pktmbuf_pool + * The mempool for allocating mbufs for packets. + * @param conf + * The pointer to the configurations of the KNI device. + * @param ops + * The pointer to the callbacks for the KNI kernel requests. + * + * @return + * - The pointer to the context of a KNI interface. + * - NULL indicate error. + */ +struct rte_kni *rte_kni_alloc(struct rte_mempool *pktmbuf_pool, + const struct rte_kni_conf *conf, struct rte_kni_ops *ops); + +/** + * Release KNI interface according to the context. It will also release the + * paired KNI interface in kernel space. All processing on the specific KNI + * context need to be stopped before calling this interface. + * + * rte_kni_release is thread safe. + * + * @param kni + * The pointer to the context of an existent KNI interface. + * + * @return + * - 0 indicates success. + * - negative value indicates failure. + */ +int rte_kni_release(struct rte_kni *kni); + +/** + * It is used to handle the request mbufs sent from kernel space. + * Then analyzes it and calls the specific actions for the specific requests. + * Finally constructs the response mbuf and puts it back to the resp_q. + * + * @param kni + * The pointer to the context of an existent KNI interface. + * + * @return + * - 0 + * - negative value indicates failure. + */ +int rte_kni_handle_request(struct rte_kni *kni); + +/** + * Retrieve a burst of packets from a KNI interface. The retrieved packets are + * stored in rte_mbuf structures whose pointers are supplied in the array of + * mbufs, and the maximum number is indicated by num. It handles allocating + * the mbufs for KNI interface alloc queue. + * + * @param kni + * The KNI interface context. + * @param mbufs + * The array to store the pointers of mbufs. + * @param num + * The maximum number per burst. + * + * @return + * The actual number of packets retrieved. + */ +unsigned rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, + unsigned num); + +/** + * Send a burst of packets to a KNI interface. The packets to be sent out are + * stored in rte_mbuf structures whose pointers are supplied in the array of + * mbufs, and the maximum number is indicated by num. It handles the freeing of + * the mbufs in the free queue of KNI interface. + * + * @param kni + * The KNI interface context. + * @param mbufs + * The array to store the pointers of mbufs. + * @param num + * The maximum number per burst. + * + * @return + * The actual number of packets sent. + */ +unsigned rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, + unsigned num); + +/** + * Get the KNI context of its name. + * + * @param name + * pointer to the KNI device name. + * + * @return + * On success: Pointer to KNI interface. + * On failure: NULL. + */ +struct rte_kni *rte_kni_get(const char *name); + +/** + * Get the name given to a KNI device + * + * @param kni + * The KNI instance to query + * @return + * The pointer to the KNI name + */ +const char *rte_kni_get_name(const struct rte_kni *kni); + +/** + * Register KNI request handling for a specified port,and it can + * be called by master process or slave process. + * + * @param kni + * pointer to struct rte_kni. + * @param ops + * pointer to struct rte_kni_ops. + * + * @return + * On success: 0 + * On failure: -1 + */ +int rte_kni_register_handlers(struct rte_kni *kni, struct rte_kni_ops *ops); + +/** + * Unregister KNI request handling for a specified port. + * + * @param kni + * pointer to struct rte_kni. + * + * @return + * On success: 0 + * On failure: -1 + */ +int rte_kni_unregister_handlers(struct rte_kni *kni); + +/** + * Update link carrier state for KNI port. + * + * Update the linkup/linkdown state of a KNI interface in the kernel. + * + * @param kni + * pointer to struct rte_kni. + * @param linkup + * New link state: + * 0 for linkdown. + * > 0 for linkup. + * + * @return + * On failure: -1 + * Previous link state == linkdown: 0 + * Previous link state == linkup: 1 + */ +__rte_experimental +int +rte_kni_update_link(struct rte_kni *kni, unsigned int linkup); + +/** + * Close KNI device. + */ +void rte_kni_close(void); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_KNI_H_ */ diff --git a/src/spdk/dpdk/lib/librte_kni/rte_kni_fifo.h b/src/spdk/dpdk/lib/librte_kni/rte_kni_fifo.h new file mode 100644 index 000000000..d2ec82fe8 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_kni/rte_kni_fifo.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + + + +/** + * @internal when c11 memory model enabled use c11 atomic memory barrier. + * when under non c11 memory model use rte_smp_* memory barrier. + * + * @param src + * Pointer to the source data. + * @param dst + * Pointer to the destination data. + * @param value + * Data value. + */ +#ifdef RTE_USE_C11_MEM_MODEL +#define __KNI_LOAD_ACQUIRE(src) ({ \ + __atomic_load_n((src), __ATOMIC_ACQUIRE); \ + }) +#define __KNI_STORE_RELEASE(dst, value) do { \ + __atomic_store_n((dst), value, __ATOMIC_RELEASE); \ + } while(0) +#else +#define __KNI_LOAD_ACQUIRE(src) ({ \ + typeof (*(src)) val = *(src); \ + rte_smp_rmb(); \ + val; \ + }) +#define __KNI_STORE_RELEASE(dst, value) do { \ + *(dst) = value; \ + rte_smp_wmb(); \ + } while(0) +#endif + +/** + * Initializes the kni fifo structure + */ +static void +kni_fifo_init(struct rte_kni_fifo *fifo, unsigned size) +{ + /* Ensure size is power of 2 */ + if (size & (size - 1)) + rte_panic("KNI fifo size must be power of 2\n"); + + fifo->write = 0; + fifo->read = 0; + fifo->len = size; + fifo->elem_size = sizeof(void *); +} + +/** + * Adds num elements into the fifo. Return the number actually written + */ +static inline unsigned +kni_fifo_put(struct rte_kni_fifo *fifo, void **data, unsigned num) +{ + unsigned i = 0; + unsigned fifo_write = fifo->write; + unsigned new_write = fifo_write; + unsigned fifo_read = __KNI_LOAD_ACQUIRE(&fifo->read); + + for (i = 0; i < num; i++) { + new_write = (new_write + 1) & (fifo->len - 1); + + if (new_write == fifo_read) + break; + fifo->buffer[fifo_write] = data[i]; + fifo_write = new_write; + } + __KNI_STORE_RELEASE(&fifo->write, fifo_write); + return i; +} + +/** + * Get up to num elements from the fifo. Return the number actually read + */ +static inline unsigned +kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num) +{ + unsigned i = 0; + unsigned new_read = fifo->read; + unsigned fifo_write = __KNI_LOAD_ACQUIRE(&fifo->write); + + for (i = 0; i < num; i++) { + if (new_read == fifo_write) + break; + + data[i] = fifo->buffer[new_read]; + new_read = (new_read + 1) & (fifo->len - 1); + } + __KNI_STORE_RELEASE(&fifo->read, new_read); + return i; +} + +/** + * Get the num of elements in the fifo + */ +static inline uint32_t +kni_fifo_count(struct rte_kni_fifo *fifo) +{ + unsigned fifo_write = __KNI_LOAD_ACQUIRE(&fifo->write); + unsigned fifo_read = __KNI_LOAD_ACQUIRE(&fifo->read); + return (fifo->len + fifo_write - fifo_read) & (fifo->len - 1); +} + +/** + * Get the num of available elements in the fifo + */ +static inline uint32_t +kni_fifo_free_count(struct rte_kni_fifo *fifo) +{ + uint32_t fifo_write = __KNI_LOAD_ACQUIRE(&fifo->write); + uint32_t fifo_read = __KNI_LOAD_ACQUIRE(&fifo->read); + return (fifo_read - fifo_write - 1) & (fifo->len - 1); +} diff --git a/src/spdk/dpdk/lib/librte_kni/rte_kni_version.map b/src/spdk/dpdk/lib/librte_kni/rte_kni_version.map new file mode 100644 index 000000000..9cd3cedc5 --- /dev/null +++ b/src/spdk/dpdk/lib/librte_kni/rte_kni_version.map @@ -0,0 +1,23 @@ +DPDK_20.0 { + global: + + rte_kni_alloc; + rte_kni_close; + rte_kni_get; + rte_kni_get_name; + rte_kni_handle_request; + rte_kni_init; + rte_kni_register_handlers; + rte_kni_release; + rte_kni_rx_burst; + rte_kni_tx_burst; + rte_kni_unregister_handlers; + + local: *; +}; + +EXPERIMENTAL { + global: + + rte_kni_update_link; +}; |