diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 11:54:28 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 11:54:28 +0000 |
commit | e6918187568dbd01842d8d1d2c808ce16a894239 (patch) | |
tree | 64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/drivers/raw/octeontx2_ep | |
parent | Initial commit. (diff) | |
download | ceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip |
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/raw/octeontx2_ep')
10 files changed, 2470 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/Makefile b/src/spdk/dpdk/drivers/raw/octeontx2_ep/Makefile new file mode 100644 index 000000000..1a54bf56f --- /dev/null +++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/Makefile @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(C) 2019 Marvell International Ltd. +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# Library name +LIB = librte_rawdev_octeontx2_ep.a + +# Build flags +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2/ +CFLAGS += -I$(RTE_SDK)/drivers/raw/octeontx2_ep/ + +LDLIBS += -lrte_eal +LDLIBS += -lrte_rawdev +LDLIBS += -lrte_bus_pci +LDLIBS += -lrte_mempool +LDLIBS += -lrte_common_octeontx2 + +ifneq ($(CONFIG_RTE_ARCH_64),y) +CFLAGS += -Wno-int-to-pointer-cast +CFLAGS += -Wno-pointer-to-int-cast +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +CFLAGS += -diag-disable 2259 +endif +endif + +EXPORT_MAP := rte_rawdev_octeontx2_ep_version.map + +# +# All source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += otx2_ep_rawdev.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += otx2_ep_enqdeq.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += otx2_ep_test.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += otx2_ep_vf.c + + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/meson.build b/src/spdk/dpdk/drivers/raw/octeontx2_ep/meson.build new file mode 100644 index 000000000..0e6338f76 --- /dev/null +++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/meson.build @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(C) 2019 Marvell International Ltd. +# + +deps += ['bus_pci', 'common_octeontx2', 'rawdev'] +sources = files('otx2_ep_rawdev.c', + 'otx2_ep_enqdeq.c', + 'otx2_ep_test.c', + 'otx2_ep_vf.c') diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c new file mode 100644 index 000000000..9f1e5eda6 --- /dev/null +++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c @@ -0,0 +1,846 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include <string.h> +#include <unistd.h> +#include <dirent.h> +#include <fcntl.h> + +#include <rte_bus.h> +#include <rte_bus_pci.h> +#include <rte_eal.h> +#include <rte_lcore.h> +#include <rte_mempool.h> +#include <rte_pci.h> + +#include <rte_common.h> +#include <rte_rawdev.h> +#include <rte_rawdev_pmd.h> + +#include "otx2_common.h" +#include "otx2_ep_enqdeq.h" + +static void +sdp_dmazone_free(const struct rte_memzone *mz) +{ + const struct rte_memzone *mz_tmp; + int ret = 0; + + if (mz == NULL) { + otx2_err("Memzone %s : NULL", mz->name); + return; + } + + mz_tmp = rte_memzone_lookup(mz->name); + if (mz_tmp == NULL) { + otx2_err("Memzone %s Not Found", mz->name); + return; + } + + ret = rte_memzone_free(mz); + if (ret) + otx2_err("Memzone free failed : ret = %d", ret); + +} + +/* Free IQ resources */ +int +sdp_delete_iqs(struct sdp_device *sdpvf, uint32_t iq_no) +{ + struct sdp_instr_queue *iq; + + iq = sdpvf->instr_queue[iq_no]; + if (iq == NULL) { + otx2_err("Invalid IQ[%d]\n", iq_no); + return -ENOMEM; + } + + rte_free(iq->req_list); + iq->req_list = NULL; + + if (iq->iq_mz) { + sdp_dmazone_free(iq->iq_mz); + iq->iq_mz = NULL; + } + + rte_free(sdpvf->instr_queue[iq_no]); + sdpvf->instr_queue[iq_no] = NULL; + + sdpvf->num_iqs--; + + otx2_info("IQ[%d] is deleted", iq_no); + + return 0; +} + +/* IQ initialization */ +static int +sdp_init_instr_queue(struct sdp_device *sdpvf, int iq_no) +{ + const struct sdp_config *conf; + struct sdp_instr_queue *iq; + uint32_t q_size; + + conf = sdpvf->conf; + iq = sdpvf->instr_queue[iq_no]; + q_size = conf->iq.instr_type * conf->num_iqdef_descs; + + /* IQ memory creation for Instruction submission to OCTEON TX2 */ + iq->iq_mz = rte_memzone_reserve_aligned("iqmz", + q_size, + rte_socket_id(), + RTE_MEMZONE_IOVA_CONTIG, + RTE_CACHE_LINE_SIZE); + if (iq->iq_mz == NULL) { + otx2_err("IQ[%d] memzone alloc failed", iq_no); + goto iq_init_fail; + } + + iq->base_addr_dma = iq->iq_mz->iova; + iq->base_addr = (uint8_t *)iq->iq_mz->addr; + + if (conf->num_iqdef_descs & (conf->num_iqdef_descs - 1)) { + otx2_err("IQ[%d] descs not in power of 2", iq_no); + goto iq_init_fail; + } + + iq->nb_desc = conf->num_iqdef_descs; + + /* Create a IQ request list to hold requests that have been + * posted to OCTEON TX2. This list will be used for freeing the IQ + * data buffer(s) later once the OCTEON TX2 fetched the requests. + */ + iq->req_list = rte_zmalloc_socket("request_list", + (iq->nb_desc * SDP_IQREQ_LIST_SIZE), + RTE_CACHE_LINE_SIZE, + rte_socket_id()); + if (iq->req_list == NULL) { + otx2_err("IQ[%d] req_list alloc failed", iq_no); + goto iq_init_fail; + } + + otx2_info("IQ[%d]: base: %p basedma: %lx count: %d", + iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma, + iq->nb_desc); + + iq->sdp_dev = sdpvf; + iq->q_no = iq_no; + iq->fill_cnt = 0; + iq->host_write_index = 0; + iq->otx_read_index = 0; + iq->flush_index = 0; + + /* Initialize the spinlock for this instruction queue */ + rte_spinlock_init(&iq->lock); + rte_spinlock_init(&iq->post_lock); + + rte_atomic64_clear(&iq->iq_flush_running); + + sdpvf->io_qmask.iq |= (1ull << iq_no); + + /* Set 32B/64B mode for each input queue */ + if (conf->iq.instr_type == 64) + sdpvf->io_qmask.iq64B |= (1ull << iq_no); + + iq->iqcmd_64B = (conf->iq.instr_type == 64); + + /* Set up IQ registers */ + sdpvf->fn_list.setup_iq_regs(sdpvf, iq_no); + + return 0; + +iq_init_fail: + return -ENOMEM; + +} + +int +sdp_setup_iqs(struct sdp_device *sdpvf, uint32_t iq_no) +{ + struct sdp_instr_queue *iq; + + iq = (struct sdp_instr_queue *)rte_zmalloc("sdp_IQ", sizeof(*iq), + RTE_CACHE_LINE_SIZE); + if (iq == NULL) + return -ENOMEM; + + sdpvf->instr_queue[iq_no] = iq; + + if (sdp_init_instr_queue(sdpvf, iq_no)) { + otx2_err("IQ init is failed"); + goto delete_IQ; + } + otx2_info("IQ[%d] is created.", sdpvf->num_iqs); + + sdpvf->num_iqs++; + + + return 0; + +delete_IQ: + sdp_delete_iqs(sdpvf, iq_no); + return -ENOMEM; +} + +static void +sdp_droq_reset_indices(struct sdp_droq *droq) +{ + droq->read_idx = 0; + droq->write_idx = 0; + droq->refill_idx = 0; + droq->refill_count = 0; + rte_atomic64_set(&droq->pkts_pending, 0); +} + +static void +sdp_droq_destroy_ring_buffers(struct sdp_device *sdpvf, + struct sdp_droq *droq) +{ + uint32_t idx; + + for (idx = 0; idx < droq->nb_desc; idx++) { + if (droq->recv_buf_list[idx].buffer) { + rte_mempool_put(sdpvf->enqdeq_mpool, + droq->recv_buf_list[idx].buffer); + + droq->recv_buf_list[idx].buffer = NULL; + } + } + + sdp_droq_reset_indices(droq); +} + +/* Free OQs resources */ +int +sdp_delete_oqs(struct sdp_device *sdpvf, uint32_t oq_no) +{ + struct sdp_droq *droq; + + droq = sdpvf->droq[oq_no]; + if (droq == NULL) { + otx2_err("Invalid droq[%d]", oq_no); + return -ENOMEM; + } + + sdp_droq_destroy_ring_buffers(sdpvf, droq); + rte_free(droq->recv_buf_list); + droq->recv_buf_list = NULL; + + if (droq->info_mz) { + sdp_dmazone_free(droq->info_mz); + droq->info_mz = NULL; + } + + if (droq->desc_ring_mz) { + sdp_dmazone_free(droq->desc_ring_mz); + droq->desc_ring_mz = NULL; + } + + memset(droq, 0, SDP_DROQ_SIZE); + + rte_free(sdpvf->droq[oq_no]); + sdpvf->droq[oq_no] = NULL; + + sdpvf->num_oqs--; + + otx2_info("OQ[%d] is deleted", oq_no); + return 0; +} + +static int +sdp_droq_setup_ring_buffers(struct sdp_device *sdpvf, + struct sdp_droq *droq) +{ + struct sdp_droq_desc *desc_ring = droq->desc_ring; + uint32_t idx; + void *buf; + + for (idx = 0; idx < droq->nb_desc; idx++) { + if (rte_mempool_get(sdpvf->enqdeq_mpool, &buf) || + (buf == NULL)) { + otx2_err("OQ buffer alloc failed"); + droq->stats.rx_alloc_failure++; + /* sdp_droq_destroy_ring_buffers(droq);*/ + return -ENOMEM; + } + + droq->recv_buf_list[idx].buffer = buf; + droq->info_list[idx].length = 0; + + /* Map ring buffers into memory */ + desc_ring[idx].info_ptr = (uint64_t)(droq->info_list_dma + + (idx * SDP_DROQ_INFO_SIZE)); + + desc_ring[idx].buffer_ptr = rte_mem_virt2iova(buf); + } + + sdp_droq_reset_indices(droq); + + return 0; +} + +static void * +sdp_alloc_info_buffer(struct sdp_device *sdpvf __rte_unused, + struct sdp_droq *droq) +{ + droq->info_mz = rte_memzone_reserve_aligned("OQ_info_list", + (droq->nb_desc * SDP_DROQ_INFO_SIZE), + rte_socket_id(), + RTE_MEMZONE_IOVA_CONTIG, + RTE_CACHE_LINE_SIZE); + + if (droq->info_mz == NULL) + return NULL; + + droq->info_list_dma = droq->info_mz->iova; + droq->info_alloc_size = droq->info_mz->len; + droq->info_base_addr = (size_t)droq->info_mz->addr; + + return droq->info_mz->addr; +} + +/* OQ initialization */ +static int +sdp_init_droq(struct sdp_device *sdpvf, uint32_t q_no) +{ + const struct sdp_config *conf = sdpvf->conf; + uint32_t c_refill_threshold; + uint32_t desc_ring_size; + struct sdp_droq *droq; + + otx2_info("OQ[%d] Init start", q_no); + + droq = sdpvf->droq[q_no]; + droq->sdp_dev = sdpvf; + droq->q_no = q_no; + + c_refill_threshold = conf->oq.refill_threshold; + droq->nb_desc = conf->num_oqdef_descs; + droq->buffer_size = conf->oqdef_buf_size; + + /* OQ desc_ring set up */ + desc_ring_size = droq->nb_desc * SDP_DROQ_DESC_SIZE; + droq->desc_ring_mz = rte_memzone_reserve_aligned("sdp_oqmz", + desc_ring_size, + rte_socket_id(), + RTE_MEMZONE_IOVA_CONTIG, + RTE_CACHE_LINE_SIZE); + + if (droq->desc_ring_mz == NULL) { + otx2_err("OQ:%d desc_ring allocation failed", q_no); + goto init_droq_fail; + } + + droq->desc_ring_dma = droq->desc_ring_mz->iova; + droq->desc_ring = (struct sdp_droq_desc *)droq->desc_ring_mz->addr; + + otx2_sdp_dbg("OQ[%d]: desc_ring: virt: 0x%p, dma: %lx", + q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma); + otx2_sdp_dbg("OQ[%d]: num_desc: %d", q_no, droq->nb_desc); + + + /* OQ info_list set up */ + droq->info_list = sdp_alloc_info_buffer(sdpvf, droq); + if (droq->info_list == NULL) { + otx2_err("memory allocation failed for OQ[%d] info_list", q_no); + goto init_droq_fail; + } + + /* OQ buf_list set up */ + droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list", + (droq->nb_desc * SDP_DROQ_RECVBUF_SIZE), + RTE_CACHE_LINE_SIZE, rte_socket_id()); + if (droq->recv_buf_list == NULL) { + otx2_err("OQ recv_buf_list alloc failed"); + goto init_droq_fail; + } + + if (sdp_droq_setup_ring_buffers(sdpvf, droq)) + goto init_droq_fail; + + droq->refill_threshold = c_refill_threshold; + rte_spinlock_init(&droq->lock); + + + /* Set up OQ registers */ + sdpvf->fn_list.setup_oq_regs(sdpvf, q_no); + + sdpvf->io_qmask.oq |= (1ull << q_no); + + return 0; + +init_droq_fail: + return -ENOMEM; +} + +/* OQ configuration and setup */ +int +sdp_setup_oqs(struct sdp_device *sdpvf, uint32_t oq_no) +{ + struct sdp_droq *droq; + + /* Allocate new droq. */ + droq = (struct sdp_droq *)rte_zmalloc("sdp_OQ", + sizeof(*droq), RTE_CACHE_LINE_SIZE); + if (droq == NULL) { + otx2_err("Droq[%d] Creation Failed", oq_no); + return -ENOMEM; + } + sdpvf->droq[oq_no] = droq; + + if (sdp_init_droq(sdpvf, oq_no)) { + otx2_err("Droq[%d] Initialization failed", oq_no); + goto delete_OQ; + } + otx2_info("OQ[%d] is created.", oq_no); + + sdpvf->num_oqs++; + + return 0; + +delete_OQ: + sdp_delete_oqs(sdpvf, oq_no); + return -ENOMEM; +} + +static inline void +sdp_iqreq_delete(struct sdp_device *sdpvf, + struct sdp_instr_queue *iq, uint32_t idx) +{ + uint32_t reqtype; + void *buf; + + buf = iq->req_list[idx].buf; + reqtype = iq->req_list[idx].reqtype; + + switch (reqtype) { + case SDP_REQTYPE_NORESP: + rte_mempool_put(sdpvf->enqdeq_mpool, buf); + otx2_sdp_dbg("IQ buffer freed at idx[%d]", idx); + break; + + case SDP_REQTYPE_NORESP_GATHER: + case SDP_REQTYPE_NONE: + default: + otx2_info("This iqreq mode is not supported:%d", reqtype); + + } + + /* Reset the request list at this index */ + iq->req_list[idx].buf = NULL; + iq->req_list[idx].reqtype = 0; +} + +static inline void +sdp_iqreq_add(struct sdp_instr_queue *iq, void *buf, + uint32_t reqtype) +{ + iq->req_list[iq->host_write_index].buf = buf; + iq->req_list[iq->host_write_index].reqtype = reqtype; + + otx2_sdp_dbg("IQ buffer added at idx[%d]", iq->host_write_index); + +} + +static void +sdp_flush_iq(struct sdp_device *sdpvf, + struct sdp_instr_queue *iq, + uint32_t pending_thresh __rte_unused) +{ + uint32_t instr_processed = 0; + + rte_spinlock_lock(&iq->lock); + + iq->otx_read_index = sdpvf->fn_list.update_iq_read_idx(iq); + while (iq->flush_index != iq->otx_read_index) { + /* Free the IQ data buffer to the pool */ + sdp_iqreq_delete(sdpvf, iq, iq->flush_index); + iq->flush_index = + sdp_incr_index(iq->flush_index, 1, iq->nb_desc); + + instr_processed++; + } + + iq->stats.instr_processed = instr_processed; + rte_atomic64_sub(&iq->instr_pending, instr_processed); + + rte_spinlock_unlock(&iq->lock); +} + +static inline void +sdp_ring_doorbell(struct sdp_device *sdpvf __rte_unused, + struct sdp_instr_queue *iq) +{ + otx2_write64(iq->fill_cnt, iq->doorbell_reg); + + /* Make sure doorbell writes observed by HW */ + rte_cio_wmb(); + iq->fill_cnt = 0; + +} + +static inline int +post_iqcmd(struct sdp_instr_queue *iq, uint8_t *iqcmd) +{ + uint8_t *iqptr, cmdsize; + + /* This ensures that the read index does not wrap around to + * the same position if queue gets full before OCTEON TX2 could + * fetch any instr. + */ + if (rte_atomic64_read(&iq->instr_pending) >= + (int32_t)(iq->nb_desc - 1)) { + otx2_err("IQ is full, pending:%ld", + (long)rte_atomic64_read(&iq->instr_pending)); + + return SDP_IQ_SEND_FAILED; + } + + /* Copy cmd into iq */ + cmdsize = ((iq->iqcmd_64B) ? 64 : 32); + iqptr = iq->base_addr + (cmdsize * iq->host_write_index); + + rte_memcpy(iqptr, iqcmd, cmdsize); + + otx2_sdp_dbg("IQ cmd posted @ index:%d", iq->host_write_index); + + /* Increment the host write index */ + iq->host_write_index = + sdp_incr_index(iq->host_write_index, 1, iq->nb_desc); + + iq->fill_cnt++; + + /* Flush the command into memory. We need to be sure the data + * is in memory before indicating that the instruction is + * pending. + */ + rte_smp_wmb(); + rte_atomic64_inc(&iq->instr_pending); + + /* SDP_IQ_SEND_SUCCESS */ + return 0; +} + + +static int +sdp_send_data(struct sdp_device *sdpvf, + struct sdp_instr_queue *iq, void *cmd) +{ + uint32_t ret; + + /* Lock this IQ command queue before posting instruction */ + rte_spinlock_lock(&iq->post_lock); + + /* Submit IQ command */ + ret = post_iqcmd(iq, cmd); + + if (ret == SDP_IQ_SEND_SUCCESS) { + sdp_ring_doorbell(sdpvf, iq); + + iq->stats.instr_posted++; + otx2_sdp_dbg("Instr submit success posted: %ld\n", + (long)iq->stats.instr_posted); + + } else { + iq->stats.instr_dropped++; + otx2_err("Instr submit failed, dropped: %ld\n", + (long)iq->stats.instr_dropped); + + } + + rte_spinlock_unlock(&iq->post_lock); + + return ret; +} + + +/* Enqueue requests/packets to SDP IQ queue. + * returns number of requests enqueued successfully + */ +int +sdp_rawdev_enqueue(struct rte_rawdev *rawdev, + struct rte_rawdev_buf **buffers __rte_unused, + unsigned int count, rte_rawdev_obj_t context) +{ + struct sdp_instr_64B *iqcmd; + struct sdp_instr_queue *iq; + struct sdp_soft_instr *si; + struct sdp_device *sdpvf; + + struct sdp_instr_ih ihx; + + sdpvf = (struct sdp_device *)rawdev->dev_private; + si = (struct sdp_soft_instr *)context; + + iq = sdpvf->instr_queue[si->q_no]; + + if ((count > 1) || (count < 1)) { + otx2_err("This mode not supported: req[%d]", count); + goto enq_fail; + } + + memset(&ihx, 0, sizeof(struct sdp_instr_ih)); + + iqcmd = &si->command; + memset(iqcmd, 0, sizeof(struct sdp_instr_64B)); + + iqcmd->dptr = (uint64_t)si->dptr; + + /* Populate SDP IH */ + ihx.pkind = sdpvf->pkind; + ihx.fsz = si->ih.fsz + 8; /* 8B for NIX IH */ + ihx.gather = si->ih.gather; + + /* Direct data instruction */ + ihx.tlen = si->ih.tlen + ihx.fsz; + + switch (ihx.gather) { + case 0: /* Direct data instr */ + ihx.tlen = si->ih.tlen + ihx.fsz; + break; + + default: /* Gather */ + switch (si->ih.gsz) { + case 0: /* Direct gather instr */ + otx2_err("Direct Gather instr : not supported"); + goto enq_fail; + + default: /* Indirect gather instr */ + otx2_err("Indirect Gather instr : not supported"); + goto enq_fail; + } + } + + rte_memcpy(&iqcmd->ih, &ihx, sizeof(uint64_t)); + iqcmd->rptr = (uint64_t)si->rptr; + rte_memcpy(&iqcmd->irh, &si->irh, sizeof(uint64_t)); + + /* Swap FSZ(front data) here, to avoid swapping on OCTEON TX2 side */ + sdp_swap_8B_data(&iqcmd->rptr, 1); + sdp_swap_8B_data(&iqcmd->irh, 1); + + otx2_sdp_dbg("After swapping"); + otx2_sdp_dbg("Word0 [dptr]: 0x%016lx", (unsigned long)iqcmd->dptr); + otx2_sdp_dbg("Word1 [ihtx]: 0x%016lx", (unsigned long)iqcmd->ih); + otx2_sdp_dbg("Word2 [rptr]: 0x%016lx", (unsigned long)iqcmd->rptr); + otx2_sdp_dbg("Word3 [irh]: 0x%016lx", (unsigned long)iqcmd->irh); + otx2_sdp_dbg("Word4 [exhdr[0]]: 0x%016lx", + (unsigned long)iqcmd->exhdr[0]); + + sdp_iqreq_add(iq, si->dptr, si->reqtype); + + if (sdp_send_data(sdpvf, iq, iqcmd)) { + otx2_err("Data send failed :"); + sdp_iqreq_delete(sdpvf, iq, iq->host_write_index); + goto enq_fail; + } + + if (rte_atomic64_read(&iq->instr_pending) >= 1) + sdp_flush_iq(sdpvf, iq, 1 /*(iq->nb_desc / 2)*/); + + /* Return no# of instructions posted successfully. */ + return count; + +enq_fail: + return SDP_IQ_SEND_FAILED; +} + +static uint32_t +sdp_droq_refill(struct sdp_device *sdpvf, struct sdp_droq *droq) +{ + struct sdp_droq_desc *desc_ring; + uint32_t desc_refilled = 0; + void *buf = NULL; + + desc_ring = droq->desc_ring; + + while (droq->refill_count && (desc_refilled < droq->nb_desc)) { + /* If a valid buffer exists (happens if there is no dispatch), + * reuse the buffer, else allocate. + */ + if (droq->recv_buf_list[droq->refill_idx].buffer != NULL) + break; + + if (rte_mempool_get(sdpvf->enqdeq_mpool, &buf) || + (buf == NULL)) { + /* If a buffer could not be allocated, no point in + * continuing + */ + droq->stats.rx_alloc_failure++; + break; + } + + droq->recv_buf_list[droq->refill_idx].buffer = buf; + desc_ring[droq->refill_idx].buffer_ptr = rte_mem_virt2iova(buf); + + /* Reset any previous values in the length field. */ + droq->info_list[droq->refill_idx].length = 0; + + droq->refill_idx = sdp_incr_index(droq->refill_idx, 1, + droq->nb_desc); + + desc_refilled++; + droq->refill_count--; + + } + + return desc_refilled; +} + +static int +sdp_droq_read_packet(struct sdp_device *sdpvf __rte_unused, + struct sdp_droq *droq, + struct sdp_droq_pkt *droq_pkt) +{ + struct sdp_droq_info *info; + uint32_t total_len = 0; + uint32_t pkt_len = 0; + + info = &droq->info_list[droq->read_idx]; + sdp_swap_8B_data((uint64_t *)&info->length, 1); + if (!info->length) { + otx2_err("OQ info_list->length[%ld]", (long)info->length); + goto oq_read_fail; + } + + /* Deduce the actual data size */ + info->length -= SDP_RH_SIZE; + total_len += (uint32_t)info->length; + + otx2_sdp_dbg("OQ: pkt_len[%ld], buffer_size %d", + (long)info->length, droq->buffer_size); + if (info->length > droq->buffer_size) { + otx2_err("This mode is not supported: pkt_len > buffer_size"); + goto oq_read_fail; + } + + if (info->length <= droq->buffer_size) { + pkt_len = (uint32_t)info->length; + droq_pkt->data = droq->recv_buf_list[droq->read_idx].buffer; + droq_pkt->len = pkt_len; + + droq->recv_buf_list[droq->read_idx].buffer = NULL; + droq->read_idx = sdp_incr_index(droq->read_idx, 1,/* count */ + droq->nb_desc /* max rd idx */); + droq->refill_count++; + + } + + info->length = 0; + + return SDP_OQ_RECV_SUCCESS; + +oq_read_fail: + return SDP_OQ_RECV_FAILED; +} + +static inline uint32_t +sdp_check_droq_pkts(struct sdp_droq *droq, uint32_t burst_size) +{ + uint32_t min_pkts = 0; + uint32_t new_pkts; + uint32_t pkt_count; + + /* Latest available OQ packets */ + pkt_count = rte_read32(droq->pkts_sent_reg); + + /* Newly arrived packets */ + new_pkts = pkt_count - droq->last_pkt_count; + otx2_sdp_dbg("Recvd [%d] new OQ pkts", new_pkts); + + min_pkts = (new_pkts > burst_size) ? burst_size : new_pkts; + if (min_pkts) { + rte_atomic64_add(&droq->pkts_pending, min_pkts); + /* Back up the aggregated packet count so far */ + droq->last_pkt_count += min_pkts; + } + + return min_pkts; +} + +/* Check for response arrival from OCTEON TX2 + * returns number of requests completed + */ +int +sdp_rawdev_dequeue(struct rte_rawdev *rawdev, + struct rte_rawdev_buf **buffers, unsigned int count, + rte_rawdev_obj_t context __rte_unused) +{ + struct sdp_droq_pkt *oq_pkt; + struct sdp_device *sdpvf; + struct sdp_droq *droq; + + uint32_t q_no = 0, pkts; + uint32_t new_pkts; + uint32_t ret; + + sdpvf = (struct sdp_device *)rawdev->dev_private; + + droq = sdpvf->droq[q_no]; + if (!droq) { + otx2_err("Invalid droq[%d]", q_no); + goto droq_err; + } + + /* Grab the lock */ + rte_spinlock_lock(&droq->lock); + + new_pkts = sdp_check_droq_pkts(droq, count); + if (!new_pkts) { + otx2_sdp_dbg("Zero new_pkts:%d", new_pkts); + goto deq_fail; /* No pkts at this moment */ + } + + otx2_sdp_dbg("Received new_pkts = %d", new_pkts); + + for (pkts = 0; pkts < new_pkts; pkts++) { + + /* Push the received pkt to application */ + oq_pkt = (struct sdp_droq_pkt *)buffers[pkts]; + + ret = sdp_droq_read_packet(sdpvf, droq, oq_pkt); + if (ret) { + otx2_err("DROQ read pakt failed."); + goto deq_fail; + } + + /* Stats */ + droq->stats.pkts_received++; + droq->stats.bytes_received += oq_pkt->len; + } + + /* Ack the h/w with no# of pkts read by Host */ + rte_write32(pkts, droq->pkts_sent_reg); + rte_cio_wmb(); + + droq->last_pkt_count -= pkts; + + otx2_sdp_dbg("DROQ pkts[%d] pushed to application", pkts); + + /* Refill DROQ buffers */ + if (droq->refill_count >= 2 /* droq->refill_threshold */) { + int desc_refilled = sdp_droq_refill(sdpvf, droq); + + /* Flush the droq descriptor data to memory to be sure + * that when we update the credits the data in memory is + * accurate. + */ + rte_write32(desc_refilled, droq->pkts_credit_reg); + + /* Ensure mmio write completes */ + rte_wmb(); + otx2_sdp_dbg("Refilled count = %d", desc_refilled); + } + + /* Release the spin lock */ + rte_spinlock_unlock(&droq->lock); + + return pkts; + +deq_fail: + rte_spinlock_unlock(&droq->lock); + +droq_err: + return SDP_OQ_RECV_FAILED; +} diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h new file mode 100644 index 000000000..172fdc556 --- /dev/null +++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#ifndef _OTX2_EP_ENQDEQ_H_ +#define _OTX2_EP_ENQDEQ_H_ + +#include <rte_byteorder.h> +#include "otx2_ep_rawdev.h" + +#define SDP_IQ_SEND_FAILED (-1) +#define SDP_IQ_SEND_SUCCESS (0) + +#define SDP_OQ_RECV_FAILED (-1) +#define SDP_OQ_RECV_SUCCESS (0) + +static inline uint64_t +sdp_endian_swap_8B(uint64_t _d) +{ + return ((((((uint64_t)(_d)) >> 0) & (uint64_t)0xff) << 56) | + (((((uint64_t)(_d)) >> 8) & (uint64_t)0xff) << 48) | + (((((uint64_t)(_d)) >> 16) & (uint64_t)0xff) << 40) | + (((((uint64_t)(_d)) >> 24) & (uint64_t)0xff) << 32) | + (((((uint64_t)(_d)) >> 32) & (uint64_t)0xff) << 24) | + (((((uint64_t)(_d)) >> 40) & (uint64_t)0xff) << 16) | + (((((uint64_t)(_d)) >> 48) & (uint64_t)0xff) << 8) | + (((((uint64_t)(_d)) >> 56) & (uint64_t)0xff) << 0)); +} + +static inline void +sdp_swap_8B_data(uint64_t *data, uint32_t blocks) +{ + /* Swap 8B blocks */ + while (blocks) { + *data = sdp_endian_swap_8B(*data); + blocks--; + data++; + } +} + +static inline uint32_t +sdp_incr_index(uint32_t index, uint32_t count, uint32_t max) +{ + if ((index + count) >= max) + index = index + count - max; + else + index += count; + + return index; +} + +#endif /* _OTX2_EP_ENQDEQ_H_ */ diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c new file mode 100644 index 000000000..0778603d5 --- /dev/null +++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c @@ -0,0 +1,361 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ +#include <string.h> +#include <unistd.h> + +#include <rte_bus.h> +#include <rte_bus_pci.h> +#include <rte_eal.h> +#include <rte_lcore.h> +#include <rte_mempool.h> +#include <rte_pci.h> + +#include <rte_common.h> +#include <rte_rawdev.h> +#include <rte_rawdev_pmd.h> + +#include "otx2_common.h" +#include "otx2_ep_rawdev.h" +#include "otx2_ep_vf.h" + +static const struct rte_pci_id pci_sdp_vf_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_EP_VF) + }, + { + .vendor_id = 0, + }, +}; + +/* SDP_VF default configuration */ +const struct sdp_config default_sdp_conf = { + /* IQ attributes */ + .iq = { + .max_iqs = SDP_VF_CFG_IO_QUEUES, + .instr_type = SDP_VF_64BYTE_INSTR, + .pending_list_size = (SDP_VF_MAX_IQ_DESCRIPTORS * + SDP_VF_CFG_IO_QUEUES), + }, + + /* OQ attributes */ + .oq = { + .max_oqs = SDP_VF_CFG_IO_QUEUES, + .info_ptr = SDP_VF_OQ_INFOPTR_MODE, + .refill_threshold = SDP_VF_OQ_REFIL_THRESHOLD, + }, + + .num_iqdef_descs = SDP_VF_MAX_IQ_DESCRIPTORS, + .num_oqdef_descs = SDP_VF_MAX_OQ_DESCRIPTORS, + .oqdef_buf_size = SDP_VF_OQ_BUF_SIZE, + +}; + +const struct sdp_config* +sdp_get_defconf(struct sdp_device *sdp_dev __rte_unused) +{ + const struct sdp_config *default_conf = NULL; + + default_conf = &default_sdp_conf; + + return default_conf; +} + +static int +sdp_vfdev_exit(struct rte_rawdev *rawdev) +{ + struct sdp_device *sdpvf; + uint32_t rawdev_queues, q; + + otx2_info("%s:", __func__); + + sdpvf = (struct sdp_device *)rawdev->dev_private; + + sdpvf->fn_list.disable_io_queues(sdpvf); + + rawdev_queues = sdpvf->num_oqs; + for (q = 0; q < rawdev_queues; q++) { + if (sdp_delete_oqs(sdpvf, q)) { + otx2_err("Failed to delete OQ:%d", q); + return -ENOMEM; + } + } + otx2_info("Num OQs:%d freed", sdpvf->num_oqs); + + /* Free the oqbuf_pool */ + rte_mempool_free(sdpvf->enqdeq_mpool); + sdpvf->enqdeq_mpool = NULL; + + otx2_info("Enqdeq_mpool free done"); + + rawdev_queues = sdpvf->num_iqs; + for (q = 0; q < rawdev_queues; q++) { + if (sdp_delete_iqs(sdpvf, q)) { + otx2_err("Failed to delete IQ:%d", q); + return -ENOMEM; + } + } + otx2_sdp_dbg("Num IQs:%d freed", sdpvf->num_iqs); + + return 0; +} + +static int +sdp_chip_specific_setup(struct sdp_device *sdpvf) +{ + struct rte_pci_device *pdev = sdpvf->pci_dev; + uint32_t dev_id = pdev->id.device_id; + int ret; + + switch (dev_id) { + case PCI_DEVID_OCTEONTX2_EP_VF: + sdpvf->chip_id = PCI_DEVID_OCTEONTX2_EP_VF; + ret = sdp_vf_setup_device(sdpvf); + + break; + default: + otx2_err("Unsupported device"); + ret = -EINVAL; + } + + if (!ret) + otx2_info("SDP dev_id[%d]", dev_id); + + return ret; +} + +/* SDP VF device initialization */ +static int +sdp_vfdev_init(struct sdp_device *sdpvf) +{ + uint32_t rawdev_queues, q; + + if (sdp_chip_specific_setup(sdpvf)) { + otx2_err("Chip specific setup failed"); + goto setup_fail; + } + + if (sdpvf->fn_list.setup_device_regs(sdpvf)) { + otx2_err("Failed to configure device registers"); + goto setup_fail; + } + + rawdev_queues = (uint32_t)(sdpvf->sriov_info.rings_per_vf); + + /* Rawdev queues setup for enqueue/dequeue */ + for (q = 0; q < rawdev_queues; q++) { + if (sdp_setup_iqs(sdpvf, q)) { + otx2_err("Failed to setup IQs"); + goto iq_fail; + } + } + otx2_info("Total[%d] IQs setup", sdpvf->num_iqs); + + for (q = 0; q < rawdev_queues; q++) { + if (sdp_setup_oqs(sdpvf, q)) { + otx2_err("Failed to setup OQs"); + goto oq_fail; + } + } + otx2_info("Total [%d] OQs setup", sdpvf->num_oqs); + + /* Enable IQ/OQ for this device */ + sdpvf->fn_list.enable_io_queues(sdpvf); + + /* Send OQ desc credits for OQs, credits are always + * sent after the OQs are enabled. + */ + for (q = 0; q < rawdev_queues; q++) { + rte_write32(sdpvf->droq[q]->nb_desc, + sdpvf->droq[q]->pkts_credit_reg); + + rte_io_mb(); + otx2_info("OQ[%d] dbells [%d]", q, + rte_read32(sdpvf->droq[q]->pkts_credit_reg)); + } + + rte_wmb(); + + otx2_info("SDP Device is Ready"); + + return 0; + +/* Error handling */ +oq_fail: + /* Free the allocated OQs */ + for (q = 0; q < sdpvf->num_oqs; q++) + sdp_delete_oqs(sdpvf, q); + +iq_fail: + /* Free the allocated IQs */ + for (q = 0; q < sdpvf->num_iqs; q++) + sdp_delete_iqs(sdpvf, q); + +setup_fail: + return -ENOMEM; +} + +static int +sdp_rawdev_start(struct rte_rawdev *dev) +{ + dev->started = 1; + + return 0; +} + +static void +sdp_rawdev_stop(struct rte_rawdev *dev) +{ + dev->started = 0; +} + +static int +sdp_rawdev_close(struct rte_rawdev *dev) +{ + int ret; + ret = sdp_vfdev_exit(dev); + if (ret) { + otx2_err(" SDP_EP rawdev exit error"); + return ret; + } + + return 0; +} + +static int +sdp_rawdev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config) +{ + struct sdp_rawdev_info *app_info = (struct sdp_rawdev_info *)config; + struct sdp_device *sdpvf; + + if (app_info == NULL) { + otx2_err("Application config info [NULL]"); + return -EINVAL; + } + + sdpvf = (struct sdp_device *)dev->dev_private; + + sdpvf->conf = app_info->app_conf; + sdpvf->enqdeq_mpool = app_info->enqdeq_mpool; + + sdp_vfdev_init(sdpvf); + + return 0; + +} + +/* SDP VF endpoint rawdev ops */ +static const struct rte_rawdev_ops sdp_rawdev_ops = { + .dev_configure = sdp_rawdev_configure, + .dev_start = sdp_rawdev_start, + .dev_stop = sdp_rawdev_stop, + .dev_close = sdp_rawdev_close, + .enqueue_bufs = sdp_rawdev_enqueue, + .dequeue_bufs = sdp_rawdev_dequeue, + .dev_selftest = sdp_rawdev_selftest, +}; + +static int +otx2_sdp_rawdev_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + char name[RTE_RAWDEV_NAME_MAX_LEN]; + struct sdp_device *sdpvf = NULL; + struct rte_rawdev *sdp_rawdev; + uint16_t vf_id; + + /* Single process support */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (pci_dev->mem_resource[0].addr) + otx2_info("SDP_EP BAR0 is mapped:"); + else { + otx2_err("SDP_EP: Failed to map device BARs"); + otx2_err("BAR0 %p\n BAR2 %p", + pci_dev->mem_resource[0].addr, + pci_dev->mem_resource[2].addr); + return -ENODEV; + } + + memset(name, 0, sizeof(name)); + snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "SDPEP:%x:%02x.%x", + pci_dev->addr.bus, pci_dev->addr.devid, + pci_dev->addr.function); + + /* Allocate rawdev pmd */ + sdp_rawdev = rte_rawdev_pmd_allocate(name, + sizeof(struct sdp_device), + rte_socket_id()); + + if (sdp_rawdev == NULL) { + otx2_err("SDP_EP VF rawdev allocation failed"); + return -ENOMEM; + } + + sdp_rawdev->dev_ops = &sdp_rawdev_ops; + sdp_rawdev->device = &pci_dev->device; + sdp_rawdev->driver_name = pci_dev->driver->driver.name; + + sdpvf = (struct sdp_device *)sdp_rawdev->dev_private; + sdpvf->hw_addr = pci_dev->mem_resource[0].addr; + sdpvf->pci_dev = pci_dev; + + /* Discover the VF number being probed */ + vf_id = ((pci_dev->addr.devid & 0x1F) << 3) | + (pci_dev->addr.function & 0x7); + + vf_id -= 1; + sdpvf->vf_num = vf_id; + + otx2_info("SDP_EP VF[%d] probe done", vf_id); + + return 0; +} + +static int +otx2_sdp_rawdev_remove(struct rte_pci_device *pci_dev) +{ + char name[RTE_RAWDEV_NAME_MAX_LEN]; + struct rte_rawdev *rawdev; + struct sdp_device *sdpvf; + + /* Single process support */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (pci_dev == NULL) { + otx2_err("SDP_EP:invalid pci_dev!"); + return -EINVAL; + } + + + memset(name, 0, sizeof(name)); + snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "SDPEP:%x:%02x.%x", + pci_dev->addr.bus, pci_dev->addr.devid, + pci_dev->addr.function); + + rawdev = rte_rawdev_pmd_get_named_dev(name); + if (rawdev == NULL) { + otx2_err("SDP_EP: invalid device name (%s)", name); + return -EINVAL; + } + + sdpvf = (struct sdp_device *)rawdev->dev_private; + otx2_info("Removing SDP_EP VF[%d] ", sdpvf->vf_num); + + /* rte_rawdev_close is called by pmd_release */ + return rte_rawdev_pmd_release(rawdev); +} + +static struct rte_pci_driver rte_sdp_rawdev_pmd = { + .id_table = pci_sdp_vf_map, + .drv_flags = (RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA), + .probe = otx2_sdp_rawdev_probe, + .remove = otx2_sdp_rawdev_remove, +}; + +RTE_PMD_REGISTER_PCI(sdp_rawdev_pci_driver, rte_sdp_rawdev_pmd); +RTE_PMD_REGISTER_PCI_TABLE(sdp_rawdev_pci_driver, pci_sdp_vf_map); +RTE_PMD_REGISTER_KMOD_DEP(sdp_rawdev_pci_driver, "vfio-pci"); diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_rawdev.h b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_rawdev.h new file mode 100644 index 000000000..dab2fb754 --- /dev/null +++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_rawdev.h @@ -0,0 +1,499 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#ifndef _OTX2_EP_RAWDEV_H_ +#define _OTX2_EP_RAWDEV_H_ + +#include <rte_byteorder.h> +#include <rte_spinlock.h> + +/* IQ instruction req types */ +#define SDP_REQTYPE_NONE (0) +#define SDP_REQTYPE_NORESP (1) +#define SDP_REQTYPE_NORESP_GATHER (2) + +/* Input Request Header format */ +struct sdp_instr_irh { + /* Request ID */ + uint64_t rid:16; + + /* PCIe port to use for response */ + uint64_t pcie_port:3; + + /* Scatter indicator 1=scatter */ + uint64_t scatter:1; + + /* Size of Expected result OR no. of entries in scatter list */ + uint64_t rlenssz:14; + + /* Desired destination port for result */ + uint64_t dport:6; + + /* Opcode Specific parameters */ + uint64_t param:8; + + /* Opcode for the return packet */ + uint64_t opcode:16; +}; + +/* SDP 32B instruction format */ +struct sdp_instr_32B { + /* Pointer where the input data is available. */ + uint64_t dptr; + + /* SDP Instruction Header. */ + uint64_t ih; + + /** Pointer where the response for a RAW mode packet + * will be written by OCTEON TX2. + */ + uint64_t rptr; + + /* Input Request Header. Additional info about the input. */ + uint64_t irh; +}; +#define SDP_32B_INSTR_SIZE (sizeof(sdp_instr_32B)) + +/* SDP 64B instruction format */ +struct sdp_instr_64B { + /* Pointer where the input data is available. */ + uint64_t dptr; + + /* SDP Instruction Header. */ + uint64_t ih; + + /** Pointer where the response for a RAW mode packet + * will be written by OCTEON TX2. + */ + uint64_t rptr; + + /* Input Request Header. */ + uint64_t irh; + + /* Additional headers available in a 64-byte instruction. */ + uint64_t exhdr[4]; +}; +#define SDP_64B_INSTR_SIZE (sizeof(sdp_instr_64B)) + +struct sdp_soft_instr { + /** Input data pointer. It is either pointing directly to input data + * or to a gather list. + */ + void *dptr; + + /** Response from OCTEON TX2 comes at this address. It is either + * directlty pointing to output data buffer or to a scatter list. + */ + void *rptr; + + /* The instruction header. All input commands have this field. */ + struct sdp_instr_ih ih; + + /* Input request header. */ + struct sdp_instr_irh irh; + + /** The PCI instruction to be sent to OCTEON TX2. This is stored in the + * instr to retrieve the physical address of buffers when instr is + * freed. + */ + struct sdp_instr_64B command; + + /** If a gather list was allocated, this ptr points to the buffer used + * for the gather list. The gather list has to be 8B aligned, so this + * value may be different from dptr. + */ + void *gather_ptr; + + /* Total data bytes transferred in the gather mode request. */ + uint64_t gather_bytes; + + /** If a scatter list was allocated, this ptr points to the buffer used + * for the scatter list. The scatter list has to be 8B aligned, so + * this value may be different from rptr. + */ + void *scatter_ptr; + + /* Total data bytes to be received in the scatter mode request. */ + uint64_t scatter_bytes; + + /* IQ number to which this instruction has to be submitted. */ + uint32_t q_no; + + /* IQ instruction request type. */ + uint32_t reqtype; +}; +#define SDP_SOFT_INSTR_SIZE (sizeof(sdp_soft_instr)) + +/* SDP IQ request list */ +struct sdp_instr_list { + void *buf; + uint32_t reqtype; +}; +#define SDP_IQREQ_LIST_SIZE (sizeof(struct sdp_instr_list)) + +/* Input Queue statistics. Each input queue has four stats fields. */ +struct sdp_iq_stats { + uint64_t instr_posted; /* Instructions posted to this queue. */ + uint64_t instr_processed; /* Instructions processed in this queue. */ + uint64_t instr_dropped; /* Instructions that could not be processed */ +}; + +/* Structure to define the configuration attributes for each Input queue. */ +struct sdp_iq_config { + /* Max number of IQs available */ + uint16_t max_iqs; + + /* Command size - 32 or 64 bytes */ + uint16_t instr_type; + + /* Pending list size, usually set to the sum of the size of all IQs */ + uint32_t pending_list_size; +}; + +/** The instruction (input) queue. + * The input queue is used to post raw (instruction) mode data or packet data + * to OCTEON TX2 device from the host. Each IQ of a SDP EP VF device has one + * such structure to represent it. + */ +struct sdp_instr_queue { + /* A spinlock to protect access to the input ring. */ + rte_spinlock_t lock; + rte_spinlock_t post_lock; + + struct sdp_device *sdp_dev; + rte_atomic64_t iq_flush_running; + + uint32_t q_no; + uint32_t pkt_in_done; + + /* Flag for 64 byte commands. */ + uint32_t iqcmd_64B:1; + uint32_t rsvd:17; + uint32_t status:8; + + /* Number of descriptors in this ring. */ + uint32_t nb_desc; + + /* Input ring index, where the driver should write the next packet */ + uint32_t host_write_index; + + /* Input ring index, where the OCTEON TX2 should read the next packet */ + uint32_t otx_read_index; + + /** This index aids in finding the window in the queue where OCTEON TX2 + * has read the commands. + */ + uint32_t flush_index; + + /* This keeps track of the instructions pending in this queue. */ + rte_atomic64_t instr_pending; + + uint32_t reset_instr_cnt; + + /* Pointer to the Virtual Base addr of the input ring. */ + uint8_t *base_addr; + + /* This IQ request list */ + struct sdp_instr_list *req_list; + + /* SDP doorbell register for the ring. */ + void *doorbell_reg; + + /* SDP instruction count register for this ring. */ + void *inst_cnt_reg; + + /* Number of instructions pending to be posted to OCTEON TX2. */ + uint32_t fill_cnt; + + /* Statistics for this input queue. */ + struct sdp_iq_stats stats; + + /* DMA mapped base address of the input descriptor ring. */ + uint64_t base_addr_dma; + + /* Memory zone */ + const struct rte_memzone *iq_mz; +}; + +/* DROQ packet format for application i/f. */ +struct sdp_droq_pkt { + /* DROQ packet data buffer pointer. */ + uint8_t *data; + + /* DROQ packet data length */ + uint32_t len; + + uint32_t misc; +}; + +/** Descriptor format. + * The descriptor ring is made of descriptors which have 2 64-bit values: + * -# Physical (bus) address of the data buffer. + * -# Physical (bus) address of a sdp_droq_info structure. + * The device DMA's incoming packets and its information at the address + * given by these descriptor fields. + */ +struct sdp_droq_desc { + /* The buffer pointer */ + uint64_t buffer_ptr; + + /* The Info pointer */ + uint64_t info_ptr; +}; +#define SDP_DROQ_DESC_SIZE (sizeof(struct sdp_droq_desc)) + +/* Receive Header */ +union sdp_rh { + uint64_t rh64; +}; +#define SDP_RH_SIZE (sizeof(union sdp_rh)) + +/** Information about packet DMA'ed by OCTEON TX2. + * The format of the information available at Info Pointer after OCTEON TX2 + * has posted a packet. Not all descriptors have valid information. Only + * the Info field of the first descriptor for a packet has information + * about the packet. + */ +struct sdp_droq_info { + /* The Output Receive Header. */ + union sdp_rh rh; + + /* The Length of the packet. */ + uint64_t length; +}; +#define SDP_DROQ_INFO_SIZE (sizeof(struct sdp_droq_info)) + +/** Pointer to data buffer. + * Driver keeps a pointer to the data buffer that it made available to + * the OCTEON TX2 device. Since the descriptor ring keeps physical (bus) + * addresses, this field is required for the driver to keep track of + * the virtual address pointers. + */ +struct sdp_recv_buffer { + /* Packet buffer, including meta data. */ + void *buffer; + + /* Data in the packet buffer. */ + /* uint8_t *data; */ +}; +#define SDP_DROQ_RECVBUF_SIZE (sizeof(struct sdp_recv_buffer)) + +/* DROQ statistics. Each output queue has four stats fields. */ +struct sdp_droq_stats { + /* Number of packets received in this queue. */ + uint64_t pkts_received; + + /* Bytes received by this queue. */ + uint64_t bytes_received; + + /* Num of failures of rte_pktmbuf_alloc() */ + uint64_t rx_alloc_failure; +}; + +/* Structure to define the configuration attributes for each Output queue. */ +struct sdp_oq_config { + /* Max number of OQs available */ + uint16_t max_oqs; + + /* If set, the Output queue uses info-pointer mode. (Default: 1 ) */ + uint16_t info_ptr; + + /** The number of buffers that were consumed during packet processing by + * the driver on this Output queue before the driver attempts to + * replenish the descriptor ring with new buffers. + */ + uint32_t refill_threshold; +}; + +/* The Descriptor Ring Output Queue(DROQ) structure. */ +struct sdp_droq { + /* A spinlock to protect access to this ring. */ + rte_spinlock_t lock; + + struct sdp_device *sdp_dev; + /* The 8B aligned descriptor ring starts at this address. */ + struct sdp_droq_desc *desc_ring; + + uint32_t q_no; + uint32_t last_pkt_count; + + /* Driver should read the next packet at this index */ + uint32_t read_idx; + + /* OCTEON TX2 will write the next packet at this index */ + uint32_t write_idx; + + /* At this index, the driver will refill the descriptor's buffer */ + uint32_t refill_idx; + + /* Packets pending to be processed */ + rte_atomic64_t pkts_pending; + + /* Number of descriptors in this ring. */ + uint32_t nb_desc; + + /* The number of descriptors pending to refill. */ + uint32_t refill_count; + + uint32_t refill_threshold; + + /* The 8B aligned info ptrs begin from this address. */ + struct sdp_droq_info *info_list; + + /* receive buffer list contains virtual addresses of the buffers. */ + struct sdp_recv_buffer *recv_buf_list; + + /* The size of each buffer pointed by the buffer pointer. */ + uint32_t buffer_size; + + /** Pointer to the mapped packet credit register. + * Host writes number of info/buffer ptrs available to this register + */ + void *pkts_credit_reg; + + /** Pointer to the mapped packet sent register. OCTEON TX2 writes the + * number of packets DMA'ed to host memory in this register. + */ + void *pkts_sent_reg; + + /* Statistics for this DROQ. */ + struct sdp_droq_stats stats; + + /* DMA mapped address of the DROQ descriptor ring. */ + size_t desc_ring_dma; + + /* Info_ptr list is allocated at this virtual address. */ + size_t info_base_addr; + + /* DMA mapped address of the info list */ + size_t info_list_dma; + + /* Allocated size of info list. */ + uint32_t info_alloc_size; + + /* Memory zone **/ + const struct rte_memzone *desc_ring_mz; + const struct rte_memzone *info_mz; +}; +#define SDP_DROQ_SIZE (sizeof(struct sdp_droq)) + +/* IQ/OQ mask */ +struct sdp_io_enable { + uint64_t iq; + uint64_t oq; + uint64_t iq64B; +}; + +/* Structure to define the configuration. */ +struct sdp_config { + /* Input Queue attributes. */ + struct sdp_iq_config iq; + + /* Output Queue attributes. */ + struct sdp_oq_config oq; + + /* Num of desc for IQ rings */ + uint32_t num_iqdef_descs; + + /* Num of desc for OQ rings */ + uint32_t num_oqdef_descs; + + /* OQ buffer size */ + uint32_t oqdef_buf_size; +}; + +/* Required functions for each VF device */ +struct sdp_fn_list { + void (*setup_iq_regs)(struct sdp_device *sdpvf, uint32_t q_no); + void (*setup_oq_regs)(struct sdp_device *sdpvf, uint32_t q_no); + + int (*setup_device_regs)(struct sdp_device *sdpvf); + uint32_t (*update_iq_read_idx)(struct sdp_instr_queue *iq); + + void (*enable_io_queues)(struct sdp_device *sdpvf); + void (*disable_io_queues)(struct sdp_device *sdpvf); + + void (*enable_iq)(struct sdp_device *sdpvf, uint32_t q_no); + void (*disable_iq)(struct sdp_device *sdpvf, uint32_t q_no); + + void (*enable_oq)(struct sdp_device *sdpvf, uint32_t q_no); + void (*disable_oq)(struct sdp_device *sdpvf, uint32_t q_no); +}; + +/* SRIOV information */ +struct sdp_sriov_info { + /* Number of rings assigned to VF */ + uint32_t rings_per_vf; + + /* Number of VF devices enabled */ + uint32_t num_vfs; +}; + + +/* Information to be passed from application */ +struct sdp_rawdev_info { + struct rte_mempool *enqdeq_mpool; + const struct sdp_config *app_conf; +}; + +/* SDP EP VF device */ +struct sdp_device { + /* PCI device pointer */ + struct rte_pci_device *pci_dev; + uint16_t chip_id; + uint16_t pf_num; + uint16_t vf_num; + + /* This device's PCIe port used for traffic. */ + uint16_t pcie_port; + uint32_t pkind; + + /* The state of this device */ + rte_atomic64_t status; + + /* Memory mapped h/w address */ + uint8_t *hw_addr; + + struct sdp_fn_list fn_list; + + /* Num IQs */ + uint32_t num_iqs; + + /* The input instruction queues */ + struct sdp_instr_queue *instr_queue[SDP_VF_MAX_IOQS_PER_RAWDEV]; + + /* Num OQs */ + uint32_t num_oqs; + + /* The DROQ output queues */ + struct sdp_droq *droq[SDP_VF_MAX_IOQS_PER_RAWDEV]; + + /* IOQ data buffer pool */ + struct rte_mempool *enqdeq_mpool; + + /* IOQ mask */ + struct sdp_io_enable io_qmask; + + /* SR-IOV info */ + struct sdp_sriov_info sriov_info; + + /* Device configuration */ + const struct sdp_config *conf; +}; + +const struct sdp_config *sdp_get_defconf(struct sdp_device *sdp_dev); +int sdp_setup_iqs(struct sdp_device *sdpvf, uint32_t iq_no); +int sdp_delete_iqs(struct sdp_device *sdpvf, uint32_t iq_no); + +int sdp_setup_oqs(struct sdp_device *sdpvf, uint32_t oq_no); +int sdp_delete_oqs(struct sdp_device *sdpvf, uint32_t oq_no); + +int sdp_rawdev_enqueue(struct rte_rawdev *dev, struct rte_rawdev_buf **buffers, + unsigned int count, rte_rawdev_obj_t context); +int sdp_rawdev_dequeue(struct rte_rawdev *dev, struct rte_rawdev_buf **buffers, + unsigned int count, rte_rawdev_obj_t context); + +int sdp_rawdev_selftest(uint16_t dev_id); + +#endif /* _OTX2_EP_RAWDEV_H_ */ diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_test.c b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_test.c new file mode 100644 index 000000000..091f1827c --- /dev/null +++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_test.c @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +#include <rte_common.h> +#include <rte_eal.h> +#include <rte_lcore.h> +#include <rte_mempool.h> + +#include <rte_rawdev.h> +#include <rte_rawdev_pmd.h> + +#include "otx2_common.h" +#include "otx2_ep_rawdev.h" + +#define SDP_IOQ_NUM_BUFS (4 * 1024) +#define SDP_IOQ_BUF_SIZE (2 * 1024) + +#define SDP_TEST_PKT_FSZ (0) +#define SDP_TEST_PKT_SIZE (1024) + +static int +sdp_validate_data(struct sdp_droq_pkt *oq_pkt, uint8_t *iq_pkt, + uint32_t pkt_len) +{ + if (!oq_pkt) + return -EINVAL; + + if (pkt_len != oq_pkt->len) { + otx2_err("Invalid packet length"); + return -EINVAL; + } + + if (memcmp(oq_pkt->data, iq_pkt, pkt_len) != 0) { + otx2_err("Data validation failed"); + return -EINVAL; + } + otx2_sdp_dbg("Data validation successful"); + + return 0; +} + +static void +sdp_ioq_buffer_fill(uint8_t *addr, uint32_t len) +{ + uint32_t idx; + + memset(addr, 0, len); + + for (idx = 0; idx < len; idx++) + addr[idx] = idx; +} + +static struct rte_mempool* +sdp_ioq_mempool_create(void) +{ + struct rte_mempool *mpool; + + mpool = rte_mempool_create("ioqbuf_pool", + SDP_IOQ_NUM_BUFS /*num elt*/, + SDP_IOQ_BUF_SIZE /*elt size*/, + 0 /*cache_size*/, + 0 /*private_data_size*/, + NULL /*mp_init*/, + NULL /*mp_init arg*/, + NULL /*obj_init*/, + NULL /*obj_init arg*/, + rte_socket_id() /*socket id*/, + (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)); + + return mpool; +} + + +int +sdp_rawdev_selftest(uint16_t dev_id) +{ + struct sdp_rawdev_info app_info = {0}; + struct rte_rawdev_info dev_info = {0}; + + struct rte_rawdev_buf *d_buf[1]; + struct sdp_droq_pkt oq_pkt; + struct sdp_soft_instr si; + struct sdp_device sdpvf; + + uint32_t buf_size; + int ret = 0; + void *buf; + + otx2_info("SDP RAWDEV Self Test: Started"); + + memset(&oq_pkt, 0x00, sizeof(oq_pkt)); + d_buf[0] = (struct rte_rawdev_buf *)&oq_pkt; + + struct rte_mempool *ioq_mpool = sdp_ioq_mempool_create(); + if (!ioq_mpool) { + otx2_err("IOQ mpool creation failed"); + return -ENOMEM; + } + + app_info.enqdeq_mpool = ioq_mpool; + app_info.app_conf = NULL; /* Use default conf */ + + dev_info.dev_private = &app_info; + + ret = rte_rawdev_configure(dev_id, &dev_info); + if (ret) { + otx2_err("Unable to configure SDP_VF %d", dev_id); + rte_mempool_free(ioq_mpool); + return -ENOMEM; + } + otx2_info("SDP VF rawdev[%d] configured successfully", dev_id); + + memset(&si, 0x00, sizeof(si)); + memset(&sdpvf, 0x00, sizeof(sdpvf)); + + buf_size = SDP_TEST_PKT_SIZE; + + si.q_no = 0; + si.reqtype = SDP_REQTYPE_NORESP; + si.rptr = NULL; + + si.ih.fsz = SDP_TEST_PKT_FSZ; + si.ih.tlen = buf_size; + si.ih.gather = 0; + + /* Enqueue raw pkt data */ + rte_mempool_get(ioq_mpool, &buf); + if (!buf) { + otx2_err("Buffer allocation failed"); + rte_mempool_free(ioq_mpool); + rte_rawdev_close(dev_id); + return -ENOMEM; + } + + sdp_ioq_buffer_fill(buf, buf_size); + si.dptr = (uint8_t *)buf; + + rte_rawdev_enqueue_buffers(dev_id, NULL, 1, &si); + usleep(10000); + + /* Dequeue raw pkt data */ + ret = 0; + while (ret < 1) { + ret = rte_rawdev_dequeue_buffers(dev_id, &d_buf[0], 1, &si); + rte_pause(); + } + + /* Validate the dequeued raw pkt data */ + if (sdp_validate_data((struct sdp_droq_pkt *)d_buf[0], + buf, buf_size) != 0) { + otx2_err("Data invalid"); + rte_mempool_put(ioq_mpool, + ((struct sdp_droq_pkt *)d_buf[0])->data); + rte_mempool_free(ioq_mpool); + rte_rawdev_close(dev_id); + return -EINVAL; + } + + rte_mempool_put(ioq_mpool, ((struct sdp_droq_pkt *)d_buf[0])->data); + rte_mempool_free(ioq_mpool); + rte_rawdev_close(dev_id); + + otx2_info("SDP RAWDEV Self Test: Successful"); + + return 0; +} diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_vf.c b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_vf.c new file mode 100644 index 000000000..bf2a19e36 --- /dev/null +++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_vf.c @@ -0,0 +1,475 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include <rte_common.h> +#include <rte_rawdev.h> +#include <rte_rawdev_pmd.h> + +#include "otx2_common.h" +#include "otx2_ep_rawdev.h" +#include "otx2_ep_vf.h" + +static int +sdp_vf_reset_iq(struct sdp_device *sdpvf, int q_no) +{ + uint64_t loop = SDP_VF_BUSY_LOOP_COUNT; + volatile uint64_t d64 = 0ull; + + /* There is no RST for a ring. + * Clear all registers one by one after disabling the ring + */ + + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_ENABLE(q_no)); + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_INSTR_BADDR(q_no)); + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_INSTR_RSIZE(q_no)); + + d64 = 0xFFFFFFFF; /* ~0ull */ + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_INSTR_DBELL(q_no)); + d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_INSTR_DBELL(q_no)); + + while ((d64 != 0) && loop--) { + otx2_write64(d64, sdpvf->hw_addr + + SDP_VF_R_IN_INSTR_DBELL(q_no)); + + rte_delay_ms(1); + + d64 = otx2_read64(sdpvf->hw_addr + + SDP_VF_R_IN_INSTR_DBELL(q_no)); + } + + loop = SDP_VF_BUSY_LOOP_COUNT; + d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_CNTS(q_no)); + while ((d64 != 0) && loop--) { + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_CNTS(q_no)); + + rte_delay_ms(1); + + d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_CNTS(q_no)); + } + + d64 = 0ull; + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_INT_LEVELS(q_no)); + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_PKT_CNT(q_no)); + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_IN_BYTE_CNT(q_no)); + + return 0; +} + +static int +sdp_vf_reset_oq(struct sdp_device *sdpvf, int q_no) +{ + uint64_t loop = SDP_VF_BUSY_LOOP_COUNT; + volatile uint64_t d64 = 0ull; + + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_ENABLE(q_no)); + + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_SLIST_BADDR(q_no)); + + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_SLIST_RSIZE(q_no)); + + d64 = 0xFFFFFFFF; + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_SLIST_DBELL(q_no)); + d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_SLIST_DBELL(q_no)); + + while ((d64 != 0) && loop--) { + otx2_write64(d64, sdpvf->hw_addr + + SDP_VF_R_OUT_SLIST_DBELL(q_no)); + + rte_delay_ms(1); + + d64 = otx2_read64(sdpvf->hw_addr + + SDP_VF_R_OUT_SLIST_DBELL(q_no)); + } + + loop = SDP_VF_BUSY_LOOP_COUNT; + d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_CNTS(q_no)); + while ((d64 != 0) && (loop--)) { + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_CNTS(q_no)); + + rte_delay_ms(1); + + d64 = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_CNTS(q_no)); + } + + d64 = 0ull; + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_INT_LEVELS(q_no)); + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_PKT_CNT(q_no)); + otx2_write64(d64, sdpvf->hw_addr + SDP_VF_R_OUT_BYTE_CNT(q_no)); + + return 0; +} + +static void +sdp_vf_setup_global_iq_reg(struct sdp_device *sdpvf, int q_no) +{ + volatile uint64_t reg_val = 0ull; + + /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for IQs + * IS_64B is by default enabled. + */ + reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_CONTROL(q_no)); + + reg_val |= SDP_VF_R_IN_CTL_RDSIZE; + reg_val |= SDP_VF_R_IN_CTL_IS_64B; + reg_val |= SDP_VF_R_IN_CTL_ESR; + + otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_IN_CONTROL(q_no)); + +} + +static void +sdp_vf_setup_global_oq_reg(struct sdp_device *sdpvf, int q_no) +{ + volatile uint64_t reg_val = 0ull; + + reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_CONTROL(q_no)); + + reg_val |= (SDP_VF_R_OUT_CTL_IMODE); + + reg_val &= ~(SDP_VF_R_OUT_CTL_ROR_P); + reg_val &= ~(SDP_VF_R_OUT_CTL_NSR_P); + reg_val &= ~(SDP_VF_R_OUT_CTL_ROR_I); + reg_val &= ~(SDP_VF_R_OUT_CTL_NSR_I); + reg_val &= ~(SDP_VF_R_OUT_CTL_ES_I); + reg_val &= ~(SDP_VF_R_OUT_CTL_ROR_D); + reg_val &= ~(SDP_VF_R_OUT_CTL_NSR_D); + reg_val &= ~(SDP_VF_R_OUT_CTL_ES_D); + + /* INFO/DATA ptr swap is required */ + reg_val |= (SDP_VF_R_OUT_CTL_ES_P); + + otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_OUT_CONTROL(q_no)); + +} + +static int +sdp_vf_reset_input_queues(struct sdp_device *sdpvf) +{ + uint32_t q_no = 0; + + otx2_sdp_dbg("%s :", __func__); + + for (q_no = 0; q_no < sdpvf->sriov_info.rings_per_vf; q_no++) + sdp_vf_reset_iq(sdpvf, q_no); + + return 0; +} + +static int +sdp_vf_reset_output_queues(struct sdp_device *sdpvf) +{ + uint64_t q_no = 0ull; + + otx2_sdp_dbg(" %s :", __func__); + + for (q_no = 0; q_no < sdpvf->sriov_info.rings_per_vf; q_no++) + sdp_vf_reset_oq(sdpvf, q_no); + + return 0; +} + +static void +sdp_vf_setup_global_input_regs(struct sdp_device *sdpvf) +{ + uint64_t q_no = 0ull; + + sdp_vf_reset_input_queues(sdpvf); + + for (q_no = 0; q_no < (sdpvf->sriov_info.rings_per_vf); q_no++) + sdp_vf_setup_global_iq_reg(sdpvf, q_no); +} + +static void +sdp_vf_setup_global_output_regs(struct sdp_device *sdpvf) +{ + uint32_t q_no; + + sdp_vf_reset_output_queues(sdpvf); + + for (q_no = 0; q_no < (sdpvf->sriov_info.rings_per_vf); q_no++) + sdp_vf_setup_global_oq_reg(sdpvf, q_no); + +} + +static int +sdp_vf_setup_device_regs(struct sdp_device *sdpvf) +{ + sdp_vf_setup_global_input_regs(sdpvf); + sdp_vf_setup_global_output_regs(sdpvf); + + return 0; +} + +static void +sdp_vf_setup_iq_regs(struct sdp_device *sdpvf, uint32_t iq_no) +{ + struct sdp_instr_queue *iq = sdpvf->instr_queue[iq_no]; + volatile uint64_t reg_val = 0ull; + + reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_CONTROL(iq_no)); + + /* Wait till IDLE to set to 1, not supposed to configure BADDR + * as long as IDLE is 0 + */ + if (!(reg_val & SDP_VF_R_IN_CTL_IDLE)) { + do { + reg_val = otx2_read64(sdpvf->hw_addr + + SDP_VF_R_IN_CONTROL(iq_no)); + } while (!(reg_val & SDP_VF_R_IN_CTL_IDLE)); + } + + /* Write the start of the input queue's ring and its size */ + otx2_write64(iq->base_addr_dma, sdpvf->hw_addr + + SDP_VF_R_IN_INSTR_BADDR(iq_no)); + otx2_write64(iq->nb_desc, sdpvf->hw_addr + + SDP_VF_R_IN_INSTR_RSIZE(iq_no)); + + /* Remember the doorbell & instruction count register addr + * for this queue + */ + iq->doorbell_reg = (uint8_t *) sdpvf->hw_addr + + SDP_VF_R_IN_INSTR_DBELL(iq_no); + iq->inst_cnt_reg = (uint8_t *) sdpvf->hw_addr + + SDP_VF_R_IN_CNTS(iq_no); + + otx2_sdp_dbg("InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p", + iq_no, iq->doorbell_reg, iq->inst_cnt_reg); + + /* Store the current instrn counter(used in flush_iq calculation) */ + iq->reset_instr_cnt = rte_read32(iq->inst_cnt_reg); + + /* IN INTR_THRESHOLD is set to max(FFFFFFFF) which disable the IN INTR + * to raise + */ + reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no)); + reg_val = 0xffffffff; + + otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no)); + +} + +static void +sdp_vf_setup_oq_regs(struct sdp_device *sdpvf, uint32_t oq_no) +{ + volatile uint64_t reg_val = 0ull; + uint64_t oq_ctl = 0ull; + + struct sdp_droq *droq = sdpvf->droq[oq_no]; + + /* Wait on IDLE to set to 1, supposed to configure BADDR + * as log as IDLE is 0 + */ + reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no)); + + while (!(reg_val & SDP_VF_R_OUT_CTL_IDLE)) { + reg_val = otx2_read64(sdpvf->hw_addr + + SDP_VF_R_OUT_CONTROL(oq_no)); + } + + otx2_write64(droq->desc_ring_dma, sdpvf->hw_addr + + SDP_VF_R_OUT_SLIST_BADDR(oq_no)); + otx2_write64(droq->nb_desc, sdpvf->hw_addr + + SDP_VF_R_OUT_SLIST_RSIZE(oq_no)); + + oq_ctl = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no)); + + /* Clear the ISIZE and BSIZE (22-0) */ + oq_ctl &= ~(0x7fffffull); + + /* Populate the BSIZE (15-0) */ + oq_ctl |= (droq->buffer_size & 0xffff); + + /* Populate ISIZE(22-16) */ + oq_ctl |= ((SDP_RH_SIZE << 16) & 0x7fffff); + otx2_write64(oq_ctl, sdpvf->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no)); + + /* Mapped address of the pkt_sent and pkts_credit regs */ + droq->pkts_sent_reg = (uint8_t *) sdpvf->hw_addr + + SDP_VF_R_OUT_CNTS(oq_no); + droq->pkts_credit_reg = (uint8_t *) sdpvf->hw_addr + + SDP_VF_R_OUT_SLIST_DBELL(oq_no); + + reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_INT_LEVELS(oq_no)); + + /* Clear PKT_CNT register */ + rte_write64(0xFFFFFFFFF, (uint8_t *)sdpvf->hw_addr + + SDP_VF_R_OUT_PKT_CNT(oq_no)); + + /* Clear the OQ doorbell */ + rte_write32(0xFFFFFFFF, droq->pkts_credit_reg); + while ((rte_read32(droq->pkts_credit_reg) != 0ull)) { + rte_write32(0xFFFFFFFF, droq->pkts_credit_reg); + rte_delay_ms(1); + } + otx2_sdp_dbg("SDP_R[%d]_credit:%x", oq_no, + rte_read32(droq->pkts_credit_reg)); + + /* Clear the OQ_OUT_CNTS doorbell */ + reg_val = rte_read32(droq->pkts_sent_reg); + rte_write32((uint32_t)reg_val, droq->pkts_sent_reg); + + otx2_sdp_dbg("SDP_R[%d]_sent: %x", oq_no, + rte_read32(droq->pkts_sent_reg)); + + while (((rte_read32(droq->pkts_sent_reg)) != 0ull)) { + reg_val = rte_read32(droq->pkts_sent_reg); + rte_write32((uint32_t)reg_val, droq->pkts_sent_reg); + rte_delay_ms(1); + } + +} + +static void +sdp_vf_enable_iq(struct sdp_device *sdpvf, uint32_t q_no) +{ + volatile uint64_t reg_val = 0ull; + uint64_t loop = SDP_VF_BUSY_LOOP_COUNT; + + /* Resetting doorbells during IQ enabling also to handle abrupt + * guest reboot. IQ reset does not clear the doorbells. + */ + otx2_write64(0xFFFFFFFF, sdpvf->hw_addr + + SDP_VF_R_IN_INSTR_DBELL(q_no)); + + while (((otx2_read64(sdpvf->hw_addr + + SDP_VF_R_IN_INSTR_DBELL(q_no))) != 0ull) && loop--) { + + rte_delay_ms(1); + } + + reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_ENABLE(q_no)); + reg_val |= 0x1ull; + + otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_IN_ENABLE(q_no)); + + otx2_info("IQ[%d] enable done", q_no); + +} + +static void +sdp_vf_enable_oq(struct sdp_device *sdpvf, uint32_t q_no) +{ + volatile uint64_t reg_val = 0ull; + + reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_ENABLE(q_no)); + reg_val |= 0x1ull; + otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_OUT_ENABLE(q_no)); + + otx2_info("OQ[%d] enable done", q_no); +} + +static void +sdp_vf_enable_io_queues(struct sdp_device *sdpvf) +{ + uint32_t q_no = 0; + + for (q_no = 0; q_no < sdpvf->num_iqs; q_no++) + sdp_vf_enable_iq(sdpvf, q_no); + + for (q_no = 0; q_no < sdpvf->num_oqs; q_no++) + sdp_vf_enable_oq(sdpvf, q_no); +} + +static void +sdp_vf_disable_iq(struct sdp_device *sdpvf, uint32_t q_no) +{ + volatile uint64_t reg_val = 0ull; + + /* Reset the doorbell register for this Input Queue. */ + reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_ENABLE(q_no)); + reg_val &= ~0x1ull; + + otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_IN_ENABLE(q_no)); +} + +static void +sdp_vf_disable_oq(struct sdp_device *sdpvf, uint32_t q_no) +{ + volatile uint64_t reg_val = 0ull; + + reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_OUT_ENABLE(q_no)); + reg_val &= ~0x1ull; + + otx2_write64(reg_val, sdpvf->hw_addr + SDP_VF_R_OUT_ENABLE(q_no)); + +} + +static void +sdp_vf_disable_io_queues(struct sdp_device *sdpvf) +{ + uint32_t q_no = 0; + + /* Disable Input Queues. */ + for (q_no = 0; q_no < sdpvf->num_iqs; q_no++) + sdp_vf_disable_iq(sdpvf, q_no); + + /* Disable Output Queues. */ + for (q_no = 0; q_no < sdpvf->num_oqs; q_no++) + sdp_vf_disable_oq(sdpvf, q_no); +} + +static uint32_t +sdp_vf_update_read_index(struct sdp_instr_queue *iq) +{ + uint32_t new_idx = rte_read32(iq->inst_cnt_reg); + + /* The new instr cnt reg is a 32-bit counter that can roll over. + * We have noted the counter's initial value at init time into + * reset_instr_cnt + */ + if (iq->reset_instr_cnt < new_idx) + new_idx -= iq->reset_instr_cnt; + else + new_idx += (0xffffffff - iq->reset_instr_cnt) + 1; + + /* Modulo of the new index with the IQ size will give us + * the new index. + */ + new_idx %= iq->nb_desc; + + return new_idx; +} + +int +sdp_vf_setup_device(struct sdp_device *sdpvf) +{ + uint64_t reg_val = 0ull; + + /* If application doesn't provide its conf, use driver default conf */ + if (sdpvf->conf == NULL) { + sdpvf->conf = sdp_get_defconf(sdpvf); + if (sdpvf->conf == NULL) { + otx2_err("SDP VF default config not found"); + return -ENOMEM; + } + otx2_info("Default config is used"); + } + + /* Get IOQs (RPVF] count */ + reg_val = otx2_read64(sdpvf->hw_addr + SDP_VF_R_IN_CONTROL(0)); + + sdpvf->sriov_info.rings_per_vf = ((reg_val >> SDP_VF_R_IN_CTL_RPVF_POS) + & SDP_VF_R_IN_CTL_RPVF_MASK); + + otx2_info("SDP RPVF: %d", sdpvf->sriov_info.rings_per_vf); + + sdpvf->fn_list.setup_iq_regs = sdp_vf_setup_iq_regs; + sdpvf->fn_list.setup_oq_regs = sdp_vf_setup_oq_regs; + + sdpvf->fn_list.setup_device_regs = sdp_vf_setup_device_regs; + sdpvf->fn_list.update_iq_read_idx = sdp_vf_update_read_index; + + sdpvf->fn_list.enable_io_queues = sdp_vf_enable_io_queues; + sdpvf->fn_list.disable_io_queues = sdp_vf_disable_io_queues; + + sdpvf->fn_list.enable_iq = sdp_vf_enable_iq; + sdpvf->fn_list.disable_iq = sdp_vf_disable_iq; + + sdpvf->fn_list.enable_oq = sdp_vf_enable_oq; + sdpvf->fn_list.disable_oq = sdp_vf_disable_oq; + + + return 0; + +} diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_vf.h b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_vf.h new file mode 100644 index 000000000..996f2e51e --- /dev/null +++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/otx2_ep_vf.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ +#ifndef _OTX2_EP_VF_H_ +#define _OTX2_EP_VF_H_ + +int +sdp_vf_setup_device(struct sdp_device *sdpvf); + +#endif /*_OTX2_EP_VF_H_ */ diff --git a/src/spdk/dpdk/drivers/raw/octeontx2_ep/rte_rawdev_octeontx2_ep_version.map b/src/spdk/dpdk/drivers/raw/octeontx2_ep/rte_rawdev_octeontx2_ep_version.map new file mode 100644 index 000000000..acdaf587d --- /dev/null +++ b/src/spdk/dpdk/drivers/raw/octeontx2_ep/rte_rawdev_octeontx2_ep_version.map @@ -0,0 +1,4 @@ +DPDK_21 { + + local: *; +}; |