1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
|
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
* Copyright 2016 NXP
*
*/
#ifndef _DPAA2_ETHDEV_H
#define _DPAA2_ETHDEV_H
#include <rte_event_eth_rx_adapter.h>
#include <rte_pmd_dpaa2.h>
#include <dpaa2_hw_pvt.h>
#include <mc/fsl_dpni.h>
#include <mc/fsl_mc_sys.h>
#define DPAA2_MIN_RX_BUF_SIZE 512
#define DPAA2_MAX_RX_PKT_LEN 10240 /*WRIOP support*/
#define MAX_TCS DPNI_MAX_TC
#define MAX_RX_QUEUES 128
#define MAX_TX_QUEUES 16
/*default tc to be used for ,congestion, distribution etc configuration. */
#define DPAA2_DEF_TC 0
/* Threshold for a Tx queue to *Enter* Congestion state.
*/
#define CONG_ENTER_TX_THRESHOLD 512
/* Threshold for a queue to *Exit* Congestion state.
*/
#define CONG_EXIT_TX_THRESHOLD 480
#define CONG_RETRY_COUNT 18000
/* RX queue tail drop threshold
* currently considering 32 KB packets
*/
#define CONG_THRESHOLD_RX_Q (64 * 1024)
#define CONG_RX_OAL 128
/* Size of the input SMMU mapped memory required by MC */
#define DIST_PARAM_IOVA_SIZE 256
/* Enable TX Congestion control support
* default is disable
*/
#define DPAA2_TX_CGR_OFF 0x01
/* Disable RX tail drop, default is enable */
#define DPAA2_RX_TAILDROP_OFF 0x04
#define DPAA2_RSS_OFFLOAD_ALL ( \
ETH_RSS_IP | \
ETH_RSS_UDP | \
ETH_RSS_TCP | \
ETH_RSS_SCTP)
/* LX2 FRC Parsed values (Little Endian) */
#define DPAA2_PKT_TYPE_ETHER 0x0060
#define DPAA2_PKT_TYPE_IPV4 0x0000
#define DPAA2_PKT_TYPE_IPV6 0x0020
#define DPAA2_PKT_TYPE_IPV4_EXT \
(0x0001 | DPAA2_PKT_TYPE_IPV4)
#define DPAA2_PKT_TYPE_IPV6_EXT \
(0x0001 | DPAA2_PKT_TYPE_IPV6)
#define DPAA2_PKT_TYPE_IPV4_TCP \
(0x000e | DPAA2_PKT_TYPE_IPV4)
#define DPAA2_PKT_TYPE_IPV6_TCP \
(0x000e | DPAA2_PKT_TYPE_IPV6)
#define DPAA2_PKT_TYPE_IPV4_UDP \
(0x0010 | DPAA2_PKT_TYPE_IPV4)
#define DPAA2_PKT_TYPE_IPV6_UDP \
(0x0010 | DPAA2_PKT_TYPE_IPV6)
#define DPAA2_PKT_TYPE_IPV4_SCTP \
(0x000f | DPAA2_PKT_TYPE_IPV4)
#define DPAA2_PKT_TYPE_IPV6_SCTP \
(0x000f | DPAA2_PKT_TYPE_IPV6)
#define DPAA2_PKT_TYPE_IPV4_ICMP \
(0x0003 | DPAA2_PKT_TYPE_IPV4_EXT)
#define DPAA2_PKT_TYPE_IPV6_ICMP \
(0x0003 | DPAA2_PKT_TYPE_IPV6_EXT)
#define DPAA2_PKT_TYPE_VLAN_1 0x0160
#define DPAA2_PKT_TYPE_VLAN_2 0x0260
/* enable timestamp in mbuf*/
extern enum pmd_dpaa2_ts dpaa2_enable_ts;
#define DPAA2_QOS_TABLE_RECONFIGURE 1
#define DPAA2_FS_TABLE_RECONFIGURE 2
/*Externaly defined*/
extern const struct rte_flow_ops dpaa2_flow_ops;
extern enum rte_filter_type dpaa2_filter_type;
struct dpaa2_dev_priv {
void *hw;
int32_t hw_id;
int32_t qdid;
uint16_t token;
uint8_t nb_tx_queues;
uint8_t nb_rx_queues;
uint32_t options;
void *rx_vq[MAX_RX_QUEUES];
void *tx_vq[MAX_TX_QUEUES];
struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
uint8_t max_mac_filters;
uint8_t max_vlan_filters;
uint8_t num_rx_tc;
uint8_t flags; /*dpaa2 config flags */
uint8_t en_ordered;
uint8_t en_loose_ordered;
struct pattern_s {
uint8_t item_count;
uint8_t pattern_type[DPKG_MAX_NUM_OF_EXTRACTS];
} pattern[MAX_TCS + 1];
struct extract_s {
struct dpkg_profile_cfg qos_key_cfg;
struct dpkg_profile_cfg fs_key_cfg[MAX_TCS];
uint64_t qos_extract_param;
uint64_t fs_extract_param[MAX_TCS];
} extract;
LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
};
int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set,
struct dpkg_profile_cfg *kg_cfg);
int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
uint64_t req_dist_set);
int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
uint8_t tc_index);
int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
int eth_rx_queue_id,
uint16_t dpcon_id,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
int eth_rx_queue_id);
uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts);
uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts);
void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
const struct qbman_fd *fd,
const struct qbman_result *dq,
struct dpaa2_queue *rxq,
struct rte_event *ev);
void dpaa2_dev_process_atomic_event(struct qbman_swp *swp,
const struct qbman_fd *fd,
const struct qbman_result *dq,
struct dpaa2_queue *rxq,
struct rte_event *ev);
void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
const struct qbman_fd *fd,
const struct qbman_result *dq,
struct dpaa2_queue *rxq,
struct rte_event *ev);
uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts);
uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci);
void dpaa2_flow_clean(struct rte_eth_dev *dev);
#endif /* _DPAA2_ETHDEV_H */
|