summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/net/hinic/hinic_pmd_rx.h
blob: 49fa565173be1f2c14037bdbb1c97386c06059b7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
/* SPDX-License-Identifier: BSD-3-Clause
 * Copyright(c) 2017 Huawei Technologies Co., Ltd
 */

#ifndef _HINIC_PMD_RX_H_
#define _HINIC_PMD_RX_H_

#define HINIC_DEFAULT_RX_FREE_THRESH	32

#define HINIC_RSS_OFFLOAD_ALL ( \
	ETH_RSS_IPV4 | \
	ETH_RSS_FRAG_IPV4 |\
	ETH_RSS_NONFRAG_IPV4_TCP | \
	ETH_RSS_NONFRAG_IPV4_UDP | \
	ETH_RSS_IPV6 | \
	ETH_RSS_FRAG_IPV6 | \
	ETH_RSS_NONFRAG_IPV6_TCP | \
	ETH_RSS_NONFRAG_IPV6_UDP | \
	ETH_RSS_IPV6_EX | \
	ETH_RSS_IPV6_TCP_EX | \
	ETH_RSS_IPV6_UDP_EX)

enum rq_completion_fmt {
	RQ_COMPLETE_SGE = 1
};

struct hinic_rq_ctrl {
	u32	ctrl_fmt;
};

struct hinic_rq_cqe {
	u32 status;
	u32 vlan_len;
	u32 offload_type;
	u32 rss_hash;

	u32 rsvd[4];
} __rte_cache_aligned;

struct hinic_rq_cqe_sect {
	struct hinic_sge	sge;
	u32			rsvd;
};

struct hinic_rq_bufdesc {
	u32	addr_high;
	u32	addr_low;
};

struct hinic_rq_wqe {
	struct hinic_rq_ctrl		ctrl;
	u32				rsvd;
	struct hinic_rq_cqe_sect	cqe_sect;
	struct hinic_rq_bufdesc		buf_desc;
};

struct hinic_rxq_stats {
	u64 packets;
	u64 bytes;
	u64 rx_nombuf;
	u64 errors;
	u64 rx_discards;
	u64 burst_pkts;
};

/* Attention, Do not add any member in hinic_rx_info
 * as rxq bulk rearm mode will write mbuf in rx_info
 */
struct hinic_rx_info {
	struct rte_mbuf *mbuf;
};

struct hinic_rxq {
	struct hinic_wq *wq;
	volatile u16 *pi_virt_addr;

	u16 port_id;
	u16 q_id;
	u16 q_depth;
	u16 buf_len;

	u16 rx_free_thresh;
	u16 rxinfo_align_end;

	u32 socket_id;

	unsigned long status;
	struct hinic_rxq_stats rxq_stats;

	struct hinic_nic_dev *nic_dev;

	struct hinic_rx_info	*rx_info;
	volatile struct hinic_rq_cqe *rx_cqe;

	dma_addr_t cqe_start_paddr;
	void *cqe_start_vaddr;
	struct rte_mempool *mb_pool;
};

int hinic_setup_rx_resources(struct hinic_rxq *rxq);

void hinic_free_all_rx_resources(struct rte_eth_dev *eth_dev);

void hinic_free_all_rx_mbuf(struct rte_eth_dev *eth_dev);

void hinic_free_rx_resources(struct hinic_rxq *rxq);

u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts);

void hinic_free_all_rx_mbufs(struct hinic_rxq *rxq);

void hinic_rx_alloc_pkts(struct hinic_rxq *rxq);

void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats);

void hinic_rxq_stats_reset(struct hinic_rxq *rxq);

int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on);

int hinic_rx_configure(struct rte_eth_dev *dev);

void hinic_rx_remove_configure(struct rte_eth_dev *dev);

void hinic_get_func_rx_buf_size(struct hinic_nic_dev *nic_dev);

int hinic_create_rq(struct hinic_hwdev *hwdev, u16 q_id,
			u16 rq_depth, unsigned int socket_id);

void hinic_destroy_rq(struct hinic_hwdev *hwdev, u16 q_id);

#endif /* _HINIC_PMD_RX_H_ */