1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
|
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2018, Microsoft Corporation.
* All Rights Reserved.
*/
#include <unistd.h>
#include <stdint.h>
#include <string.h>
#include <sys/uio.h>
#include <rte_eal.h>
#include <rte_tailq.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_bus.h>
#include <rte_atomic.h>
#include <rte_memory.h>
#include <rte_bus_vmbus.h>
#include "private.h"
static inline void
vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask)
{
/* Use GCC builtin which atomic does atomic OR operation */
__sync_or_and_fetch(addr, mask);
}
static inline void
vmbus_send_interrupt(const struct rte_vmbus_device *dev, uint32_t relid)
{
uint32_t *int_addr;
uint32_t int_mask;
int_addr = dev->int_page + relid / 32;
int_mask = 1u << (relid % 32);
vmbus_sync_set_bit(int_addr, int_mask);
}
static inline void
vmbus_set_monitor(const struct rte_vmbus_device *dev, uint32_t monitor_id)
{
uint32_t *monitor_addr, monitor_mask;
unsigned int trigger_index;
trigger_index = monitor_id / HV_MON_TRIG_LEN;
monitor_mask = 1u << (monitor_id % HV_MON_TRIG_LEN);
monitor_addr = &dev->monitor_page->trigs[trigger_index].pending;
vmbus_sync_set_bit(monitor_addr, monitor_mask);
}
static void
vmbus_set_event(const struct rte_vmbus_device *dev,
const struct vmbus_channel *chan)
{
vmbus_send_interrupt(dev, chan->relid);
vmbus_set_monitor(dev, chan->monitor_id);
}
/*
* Set the wait between when hypervisor examines the trigger.
*/
void
rte_vmbus_set_latency(const struct rte_vmbus_device *dev,
const struct vmbus_channel *chan,
uint32_t latency)
{
uint32_t trig_idx = chan->monitor_id / VMBUS_MONTRIG_LEN;
uint32_t trig_offs = chan->monitor_id % VMBUS_MONTRIG_LEN;
if (latency >= UINT16_MAX * 100) {
VMBUS_LOG(ERR, "invalid latency value %u", latency);
return;
}
if (trig_idx >= VMBUS_MONTRIGS_MAX) {
VMBUS_LOG(ERR, "invalid monitor trigger %u",
trig_idx);
return;
}
/* Host value is expressed in 100 nanosecond units */
dev->monitor_page->lat[trig_idx][trig_offs] = latency / 100;
}
/*
* Notify host that there are data pending on our TX bufring.
*
* Since this in userspace, rely on the monitor page.
* Can't do a hypercall from userspace.
*/
void
rte_vmbus_chan_signal_tx(const struct vmbus_channel *chan)
{
const struct rte_vmbus_device *dev = chan->device;
const struct vmbus_br *tbr = &chan->txbr;
/* Make sure all updates are done before signaling host */
rte_smp_wmb();
/* If host is ignoring interrupts? */
if (tbr->vbr->imask)
return;
vmbus_set_event(dev, chan);
}
/* Do a simple send directly using transmit ring. */
int rte_vmbus_chan_send(struct vmbus_channel *chan, uint16_t type,
void *data, uint32_t dlen,
uint64_t xactid, uint32_t flags, bool *need_sig)
{
struct vmbus_chanpkt pkt;
unsigned int pktlen, pad_pktlen;
const uint32_t hlen = sizeof(pkt);
bool send_evt = false;
uint64_t pad = 0;
struct iovec iov[3];
int error;
pktlen = hlen + dlen;
pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t));
pkt.hdr.type = type;
pkt.hdr.flags = flags;
pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT;
pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT;
pkt.hdr.xactid = xactid;
iov[0].iov_base = &pkt;
iov[0].iov_len = hlen;
iov[1].iov_base = data;
iov[1].iov_len = dlen;
iov[2].iov_base = &pad;
iov[2].iov_len = pad_pktlen - pktlen;
error = vmbus_txbr_write(&chan->txbr, iov, 3, &send_evt);
/*
* caller sets need_sig to non-NULL if it will handle
* signaling if required later.
* if need_sig is NULL, signal now if needed.
*/
if (need_sig)
*need_sig |= send_evt;
else if (error == 0 && send_evt)
rte_vmbus_chan_signal_tx(chan);
return error;
}
/* Do a scatter/gather send where the descriptor points to data. */
int rte_vmbus_chan_send_sglist(struct vmbus_channel *chan,
struct vmbus_gpa sg[], uint32_t sglen,
void *data, uint32_t dlen,
uint64_t xactid, bool *need_sig)
{
struct vmbus_chanpkt_sglist pkt;
unsigned int pktlen, pad_pktlen, hlen;
bool send_evt = false;
struct iovec iov[4];
uint64_t pad = 0;
int error;
hlen = offsetof(struct vmbus_chanpkt_sglist, gpa[sglen]);
pktlen = hlen + dlen;
pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t));
pkt.hdr.type = VMBUS_CHANPKT_TYPE_GPA;
pkt.hdr.flags = VMBUS_CHANPKT_FLAG_RC;
pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT;
pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT;
pkt.hdr.xactid = xactid;
pkt.rsvd = 0;
pkt.gpa_cnt = sglen;
iov[0].iov_base = &pkt;
iov[0].iov_len = sizeof(pkt);
iov[1].iov_base = sg;
iov[1].iov_len = sizeof(struct vmbus_gpa) * sglen;
iov[2].iov_base = data;
iov[2].iov_len = dlen;
iov[3].iov_base = &pad;
iov[3].iov_len = pad_pktlen - pktlen;
error = vmbus_txbr_write(&chan->txbr, iov, 4, &send_evt);
/* if caller is batching, just propagate the status */
if (need_sig)
*need_sig |= send_evt;
else if (error == 0 && send_evt)
rte_vmbus_chan_signal_tx(chan);
return error;
}
bool rte_vmbus_chan_rx_empty(const struct vmbus_channel *channel)
{
const struct vmbus_br *br = &channel->rxbr;
rte_smp_rmb();
return br->vbr->rindex == br->vbr->windex;
}
/* Signal host after reading N bytes */
void rte_vmbus_chan_signal_read(struct vmbus_channel *chan, uint32_t bytes_read)
{
struct vmbus_br *rbr = &chan->rxbr;
uint32_t write_sz, pending_sz;
/* No need for signaling on older versions */
if (!rbr->vbr->feature_bits.feat_pending_send_sz)
return;
/* Make sure reading of pending happens after new read index */
rte_mb();
pending_sz = rbr->vbr->pending_send;
if (!pending_sz)
return;
rte_smp_rmb();
write_sz = vmbus_br_availwrite(rbr, rbr->vbr->windex);
/* If there was space before then host was not blocked */
if (write_sz - bytes_read > pending_sz)
return;
/* If pending write will not fit */
if (write_sz <= pending_sz)
return;
vmbus_set_event(chan->device, chan);
}
int rte_vmbus_chan_recv(struct vmbus_channel *chan, void *data, uint32_t *len,
uint64_t *request_id)
{
struct vmbus_chanpkt_hdr pkt;
uint32_t dlen, hlen, bufferlen = *len;
int error;
*len = 0;
error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt));
if (error)
return error;
if (unlikely(pkt.hlen < VMBUS_CHANPKT_HLEN_MIN)) {
VMBUS_LOG(ERR, "VMBUS recv, invalid hlen %u", pkt.hlen);
/* XXX this channel is dead actually. */
return -EIO;
}
if (unlikely(pkt.hlen > pkt.tlen)) {
VMBUS_LOG(ERR, "VMBUS recv,invalid hlen %u and tlen %u",
pkt.hlen, pkt.tlen);
return -EIO;
}
/* Length are in quad words */
hlen = pkt.hlen << VMBUS_CHANPKT_SIZE_SHIFT;
dlen = (pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT) - hlen;
*len = dlen;
/* If caller buffer is not large enough */
if (unlikely(dlen > bufferlen))
return -ENOBUFS;
if (request_id)
*request_id = pkt.xactid;
/* Read data and skip packet header */
error = vmbus_rxbr_read(&chan->rxbr, data, dlen, hlen);
if (error)
return error;
rte_vmbus_chan_signal_read(chan, dlen + hlen + sizeof(uint64_t));
return 0;
}
/* TODO: replace this with inplace ring buffer (no copy) */
int rte_vmbus_chan_recv_raw(struct vmbus_channel *chan,
void *data, uint32_t *len)
{
struct vmbus_chanpkt_hdr pkt;
uint32_t dlen, bufferlen = *len;
int error;
error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt));
if (error)
return error;
if (unlikely(pkt.hlen < VMBUS_CHANPKT_HLEN_MIN)) {
VMBUS_LOG(ERR, "VMBUS recv, invalid hlen %u", pkt.hlen);
/* XXX this channel is dead actually. */
return -EIO;
}
if (unlikely(pkt.hlen > pkt.tlen)) {
VMBUS_LOG(ERR, "VMBUS recv,invalid hlen %u and tlen %u",
pkt.hlen, pkt.tlen);
return -EIO;
}
/* Length are in quad words */
dlen = pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT;
*len = dlen;
/* If caller buffer is not large enough */
if (unlikely(dlen > bufferlen))
return -ENOBUFS;
/* Read data and skip packet header */
error = vmbus_rxbr_read(&chan->rxbr, data, dlen, 0);
if (error)
return error;
/* Return the number of bytes read */
return dlen + sizeof(uint64_t);
}
int vmbus_chan_create(const struct rte_vmbus_device *device,
uint16_t relid, uint16_t subid, uint8_t monitor_id,
struct vmbus_channel **new_chan)
{
struct vmbus_channel *chan;
int err;
chan = rte_zmalloc_socket("VMBUS", sizeof(*chan), RTE_CACHE_LINE_SIZE,
device->device.numa_node);
if (!chan)
return -ENOMEM;
STAILQ_INIT(&chan->subchannel_list);
chan->device = device;
chan->subchannel_id = subid;
chan->relid = relid;
chan->monitor_id = monitor_id;
*new_chan = chan;
err = vmbus_uio_map_rings(chan);
if (err) {
rte_free(chan);
return err;
}
return 0;
}
/* Setup the primary channel */
int rte_vmbus_chan_open(struct rte_vmbus_device *device,
struct vmbus_channel **new_chan)
{
struct mapped_vmbus_resource *uio_res;
int err;
uio_res = vmbus_uio_find_resource(device);
if (!uio_res) {
VMBUS_LOG(ERR, "can't find uio resource");
return -EINVAL;
}
err = vmbus_chan_create(device, device->relid, 0,
device->monitor_id, new_chan);
if (!err) {
device->primary = *new_chan;
uio_res->primary = *new_chan;
}
return err;
}
int rte_vmbus_max_channels(const struct rte_vmbus_device *device)
{
if (vmbus_uio_subchannels_supported(device, device->primary))
return VMBUS_MAX_CHANNELS;
else
return 1;
}
/* Setup secondary channel */
int rte_vmbus_subchan_open(struct vmbus_channel *primary,
struct vmbus_channel **new_chan)
{
struct vmbus_channel *chan;
int err;
err = vmbus_uio_get_subchan(primary, &chan);
if (err)
return err;
STAILQ_INSERT_TAIL(&primary->subchannel_list, chan, next);
*new_chan = chan;
return 0;
}
uint16_t rte_vmbus_sub_channel_index(const struct vmbus_channel *chan)
{
return chan->subchannel_id;
}
void rte_vmbus_chan_close(struct vmbus_channel *chan)
{
const struct rte_vmbus_device *device = chan->device;
struct vmbus_channel *primary = device->primary;
/*
* intentionally leak primary channel because
* secondary may still reference it
*/
if (chan != primary) {
STAILQ_REMOVE(&primary->subchannel_list, chan,
vmbus_channel, next);
rte_free(chan);
}
}
static void vmbus_dump_ring(FILE *f, const char *id, const struct vmbus_br *br)
{
const struct vmbus_bufring *vbr = br->vbr;
struct vmbus_chanpkt_hdr pkt;
fprintf(f, "%s windex=%u rindex=%u mask=%u pending=%u feature=%#x\n",
id, vbr->windex, vbr->rindex, vbr->imask,
vbr->pending_send, vbr->feature_bits.value);
fprintf(f, " size=%u avail write=%u read=%u\n",
br->dsize, vmbus_br_availwrite(br, vbr->windex),
vmbus_br_availread(br));
if (vmbus_rxbr_peek(br, &pkt, sizeof(pkt)) == 0)
fprintf(f, " pkt type %#x len %u flags %#x xactid %#"PRIx64"\n",
pkt.type,
pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT,
pkt.flags, pkt.xactid);
}
void rte_vmbus_chan_dump(FILE *f, const struct vmbus_channel *chan)
{
fprintf(f, "channel[%u] relid=%u monitor=%u\n",
chan->subchannel_id, chan->relid, chan->monitor_id);
vmbus_dump_ring(f, "rxbr", &chan->rxbr);
vmbus_dump_ring(f, "txbr", &chan->txbr);
}
|