1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
|
/* SPDX-License-Identifier: GPL-2.0 */
/* AF_XDP internal functions
* Copyright(c) 2018 Intel Corporation.
*/
#ifndef _LINUX_XDP_SOCK_H
#define _LINUX_XDP_SOCK_H
#include <linux/bpf.h>
#include <linux/workqueue.h>
#include <linux/if_xdp.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <net/sock.h>
#define XDP_UMEM_SG_FLAG (1 << 1)
struct net_device;
struct xsk_queue;
struct xdp_buff;
struct xdp_umem {
void *addrs;
u64 size;
u32 headroom;
u32 chunk_size;
u32 chunks;
u32 npgs;
struct user_struct *user;
refcount_t users;
u8 flags;
u8 tx_metadata_len;
bool zc;
struct page **pgs;
int id;
struct list_head xsk_dma_list;
struct work_struct work;
};
struct xsk_map {
struct bpf_map map;
spinlock_t lock; /* Synchronize map updates */
atomic_t count;
struct xdp_sock __rcu *xsk_map[];
};
struct xdp_sock {
/* struct sock must be the first member of struct xdp_sock */
struct sock sk;
struct xsk_queue *rx ____cacheline_aligned_in_smp;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
struct xsk_buff_pool *pool;
u16 queue_id;
bool zc;
bool sg;
enum {
XSK_READY = 0,
XSK_BOUND,
XSK_UNBOUND,
} state;
struct xsk_queue *tx ____cacheline_aligned_in_smp;
struct list_head tx_list;
/* record the number of tx descriptors sent by this xsk and
* when it exceeds MAX_PER_SOCKET_BUDGET, an opportunity needs
* to be given to other xsks for sending tx descriptors, thereby
* preventing other XSKs from being starved.
*/
u32 tx_budget_spent;
/* Protects generic receive. */
spinlock_t rx_lock;
/* Statistics */
u64 rx_dropped;
u64 rx_queue_full;
/* When __xsk_generic_xmit() must return before it sees the EOP descriptor for the current
* packet, the partially built skb is saved here so that packet building can resume in next
* call of __xsk_generic_xmit().
*/
struct sk_buff *skb;
struct list_head map_list;
/* Protects map_list */
spinlock_t map_list_lock;
/* Protects multiple processes in the control path */
struct mutex mutex;
struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
};
/*
* AF_XDP TX metadata hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
* optional and can be filled with a null pointer.
*
* void (*tmo_request_timestamp)(void *priv)
* Called when AF_XDP frame requested egress timestamp.
*
* u64 (*tmo_fill_timestamp)(void *priv)
* Called when AF_XDP frame, that had requested egress timestamp,
* received a completion. The hook needs to return the actual HW timestamp.
*
* void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv)
* Called when AF_XDP frame requested HW checksum offload. csum_start
* indicates position where checksumming should start.
* csum_offset indicates position where checksum should be stored.
*
*/
struct xsk_tx_metadata_ops {
void (*tmo_request_timestamp)(void *priv);
u64 (*tmo_fill_timestamp)(void *priv);
void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv);
};
#ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
void __xsk_map_flush(void);
/**
* xsk_tx_metadata_to_compl - Save enough relevant metadata information
* to perform tx completion in the future.
* @meta: pointer to AF_XDP metadata area
* @compl: pointer to output struct xsk_tx_metadata_to_compl
*
* This function should be called by the networking device when
* it prepares AF_XDP egress packet. The value of @compl should be stored
* and passed to xsk_tx_metadata_complete upon TX completion.
*/
static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta,
struct xsk_tx_metadata_compl *compl)
{
if (!meta)
return;
if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP)
compl->tx_timestamp = &meta->completion.tx_timestamp;
else
compl->tx_timestamp = NULL;
}
/**
* xsk_tx_metadata_request - Evaluate AF_XDP TX metadata at submission
* and call appropriate xsk_tx_metadata_ops operation.
* @meta: pointer to AF_XDP metadata area
* @ops: pointer to struct xsk_tx_metadata_ops
* @priv: pointer to driver-private aread
*
* This function should be called by the networking device when
* it prepares AF_XDP egress packet.
*/
static inline void xsk_tx_metadata_request(const struct xsk_tx_metadata *meta,
const struct xsk_tx_metadata_ops *ops,
void *priv)
{
if (!meta)
return;
if (ops->tmo_request_timestamp)
if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP)
ops->tmo_request_timestamp(priv);
if (ops->tmo_request_checksum)
if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM)
ops->tmo_request_checksum(meta->request.csum_start,
meta->request.csum_offset, priv);
}
/**
* xsk_tx_metadata_complete - Evaluate AF_XDP TX metadata at completion
* and call appropriate xsk_tx_metadata_ops operation.
* @compl: pointer to completion metadata produced from xsk_tx_metadata_to_compl
* @ops: pointer to struct xsk_tx_metadata_ops
* @priv: pointer to driver-private aread
*
* This function should be called by the networking device upon
* AF_XDP egress completion.
*/
static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
const struct xsk_tx_metadata_ops *ops,
void *priv)
{
if (!compl)
return;
if (!compl->tx_timestamp)
return;
*compl->tx_timestamp = ops->tmo_fill_timestamp(priv);
}
#else
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
return -ENOTSUPP;
}
static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{
return -EOPNOTSUPP;
}
static inline void __xsk_map_flush(void)
{
}
static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta,
struct xsk_tx_metadata_compl *compl)
{
}
static inline void xsk_tx_metadata_request(struct xsk_tx_metadata *meta,
const struct xsk_tx_metadata_ops *ops,
void *priv)
{
}
static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
const struct xsk_tx_metadata_ops *ops,
void *priv)
{
}
#endif /* CONFIG_XDP_SOCKETS */
#if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET)
bool xsk_map_check_flush(void);
#else
static inline bool xsk_map_check_flush(void)
{
return false;
}
#endif
#endif /* _LINUX_XDP_SOCK_H */
|