1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* if_alg: User-space algorithm interface
*
* Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
*/
#ifndef _CRYPTO_IF_ALG_H
#define _CRYPTO_IF_ALG_H
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/if_alg.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <net/sock.h>
#include <crypto/aead.h>
#include <crypto/skcipher.h>
#define ALG_MAX_PAGES 16
struct alg_sock {
/* struct sock must be the first member of struct alg_sock */
struct sock sk;
struct sock *parent;
atomic_t refcnt;
atomic_t nokey_refcnt;
const struct af_alg_type *type;
void *private;
};
struct af_alg_control {
struct af_alg_iv *iv;
int op;
unsigned int aead_assoclen;
};
struct af_alg_type {
void *(*bind)(const char *name, u32 type, u32 mask);
void (*release)(void *private);
int (*setkey)(void *private, const u8 *key, unsigned int keylen);
int (*setentropy)(void *private, sockptr_t entropy, unsigned int len);
int (*accept)(void *private, struct sock *sk);
int (*accept_nokey)(void *private, struct sock *sk);
int (*setauthsize)(void *private, unsigned int authsize);
struct proto_ops *ops;
struct proto_ops *ops_nokey;
struct module *owner;
char name[14];
};
struct af_alg_sgl {
struct sg_table sgt;
struct scatterlist sgl[ALG_MAX_PAGES + 1];
bool need_unpin;
};
/* TX SGL entry */
struct af_alg_tsgl {
struct list_head list;
unsigned int cur; /* Last processed SG entry */
struct scatterlist sg[]; /* Array of SGs forming the SGL */
};
#define MAX_SGL_ENTS ((4096 - sizeof(struct af_alg_tsgl)) / \
sizeof(struct scatterlist) - 1)
/* RX SGL entry */
struct af_alg_rsgl {
struct af_alg_sgl sgl;
struct list_head list;
size_t sg_num_bytes; /* Bytes of data in that SGL */
};
/**
* struct af_alg_async_req - definition of crypto request
* @iocb: IOCB for AIO operations
* @sk: Socket the request is associated with
* @first_rsgl: First RX SG
* @last_rsgl: Pointer to last RX SG
* @rsgl_list: Track RX SGs
* @tsgl: Private, per request TX SGL of buffers to process
* @tsgl_entries: Number of entries in priv. TX SGL
* @outlen: Number of output bytes generated by crypto op
* @areqlen: Length of this data structure
* @cra_u: Cipher request
*/
struct af_alg_async_req {
struct kiocb *iocb;
struct sock *sk;
struct af_alg_rsgl first_rsgl;
struct af_alg_rsgl *last_rsgl;
struct list_head rsgl_list;
struct scatterlist *tsgl;
unsigned int tsgl_entries;
unsigned int outlen;
unsigned int areqlen;
union {
struct aead_request aead_req;
struct skcipher_request skcipher_req;
} cra_u;
/* req ctx trails this struct */
};
/**
* struct af_alg_ctx - definition of the crypto context
*
* The crypto context tracks the input data during the lifetime of an AF_ALG
* socket.
*
* @tsgl_list: Link to TX SGL
* @iv: IV for cipher operation
* @state: Existing state for continuing operation
* @aead_assoclen: Length of AAD for AEAD cipher operations
* @completion: Work queue for synchronous operation
* @used: TX bytes sent to kernel. This variable is used to
* ensure that user space cannot cause the kernel
* to allocate too much memory in sendmsg operation.
* @rcvused: Total RX bytes to be filled by kernel. This variable
* is used to ensure user space cannot cause the kernel
* to allocate too much memory in a recvmsg operation.
* @more: More data to be expected from user space?
* @merge: Shall new data from user space be merged into existing
* SG?
* @enc: Cryptographic operation to be performed when
* recvmsg is invoked.
* @init: True if metadata has been sent.
* @len: Length of memory allocated for this data structure.
* @inflight: Non-zero when AIO requests are in flight.
*/
struct af_alg_ctx {
struct list_head tsgl_list;
void *iv;
void *state;
size_t aead_assoclen;
struct crypto_wait wait;
size_t used;
atomic_t rcvused;
bool more;
bool merge;
bool enc;
bool init;
unsigned int len;
unsigned int inflight;
};
int af_alg_register_type(const struct af_alg_type *type);
int af_alg_unregister_type(const struct af_alg_type *type);
int af_alg_release(struct socket *sock);
void af_alg_release_parent(struct sock *sk);
int af_alg_accept(struct sock *sk, struct socket *newsock,
struct proto_accept_arg *arg);
void af_alg_free_sg(struct af_alg_sgl *sgl);
static inline struct alg_sock *alg_sk(struct sock *sk)
{
return (struct alg_sock *)sk;
}
/**
* Size of available buffer for sending data from user space to kernel.
*
* @sk socket of connection to user space
* @return number of bytes still available
*/
static inline int af_alg_sndbuf(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
ctx->used, 0);
}
/**
* Can the send buffer still be written to?
*
* @sk socket of connection to user space
* @return true => writable, false => not writable
*/
static inline bool af_alg_writable(struct sock *sk)
{
return PAGE_SIZE <= af_alg_sndbuf(sk);
}
/**
* Size of available buffer used by kernel for the RX user space operation.
*
* @sk socket of connection to user space
* @return number of bytes still available
*/
static inline int af_alg_rcvbuf(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
atomic_read(&ctx->rcvused), 0);
}
/**
* Can the RX buffer still be written to?
*
* @sk socket of connection to user space
* @return true => writable, false => not writable
*/
static inline bool af_alg_readable(struct sock *sk)
{
return PAGE_SIZE <= af_alg_rcvbuf(sk);
}
unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
size_t dst_offset);
void af_alg_wmem_wakeup(struct sock *sk);
int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unsigned int ivsize);
void af_alg_free_resources(struct af_alg_async_req *areq);
void af_alg_async_cb(void *data, int err);
__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen);
int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
struct af_alg_async_req *areq, size_t maxsize,
size_t *outlen);
#endif /* _CRYPTO_IF_ALG_H */
|