1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
|
/*
* Ring buffer management
*
* Copyright (C) 2000-2019 Willy Tarreau - w@1wt.eu
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation, version 2.1
* exclusively.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdlib.h>
#include <haproxy/api.h>
#include <haproxy/applet.h>
#include <haproxy/buf.h>
#include <haproxy/cli.h>
#include <haproxy/ring.h>
#include <haproxy/sc_strm.h>
#include <haproxy/stconn.h>
#include <haproxy/thread.h>
/* context used to dump the contents of a ring via "show events" or "show errors" */
struct show_ring_ctx {
struct ring *ring; /* ring to be dumped */
size_t ofs; /* offset to restart from, ~0 = end */
uint flags; /* set of RING_WF_* */
};
/* Initialize a pre-allocated ring with the buffer area
* of size */
void ring_init(struct ring *ring, void *area, size_t size)
{
HA_RWLOCK_INIT(&ring->lock);
LIST_INIT(&ring->waiters);
ring->readers_count = 0;
ring->ofs = 0;
ring->buf = b_make(area, size, 0, 0);
/* write the initial RC byte */
b_putchr(&ring->buf, 0);
}
/* Creates and returns a ring buffer of size <size> bytes. Returns NULL on
* allocation failure.
*/
struct ring *ring_new(size_t size)
{
struct ring *ring = NULL;
void *area = NULL;
if (size < 2)
goto fail;
ring = malloc(sizeof(*ring));
if (!ring)
goto fail;
area = malloc(size);
if (!area)
goto fail;
ring_init(ring, area, size);
return ring;
fail:
free(area);
free(ring);
return NULL;
}
/* Creates a unified ring + storage area at address <area> for <size> bytes.
* If <area> is null, then it's allocated of the requested size. The ring
* struct is part of the area so the usable area is slightly reduced. However
* the ring storage is immediately adjacent to the struct. ring_free() will
* ignore such rings, so the caller is responsible for releasing them.
*/
struct ring *ring_make_from_area(void *area, size_t size)
{
struct ring *ring = NULL;
if (size < sizeof(ring))
return NULL;
if (!area)
area = malloc(size);
if (!area)
return NULL;
ring = area;
area += sizeof(*ring);
ring_init(ring, area, size - sizeof(*ring));
return ring;
}
/* Resizes existing ring <ring> to <size> which must be larger, without losing
* its contents. The new size must be at least as large as the previous one or
* no change will be performed. The pointer to the ring is returned on success,
* or NULL on allocation failure. This will lock the ring for writes.
*/
struct ring *ring_resize(struct ring *ring, size_t size)
{
void *area;
if (b_size(&ring->buf) >= size)
return ring;
area = malloc(size);
if (!area)
return NULL;
HA_RWLOCK_WRLOCK(LOGSRV_LOCK, &ring->lock);
/* recheck the buffer's size, it may have changed during the malloc */
if (b_size(&ring->buf) < size) {
/* copy old contents */
b_getblk(&ring->buf, area, ring->buf.data, 0);
area = HA_ATOMIC_XCHG(&ring->buf.area, area);
ring->buf.size = size;
}
HA_RWLOCK_WRUNLOCK(LOGSRV_LOCK, &ring->lock);
free(area);
return ring;
}
/* destroys and frees ring <ring> */
void ring_free(struct ring *ring)
{
if (!ring)
return;
/* make sure it was not allocated by ring_make_from_area */
if (ring->buf.area == (void *)ring + sizeof(*ring))
return;
free(ring->buf.area);
free(ring);
}
/* Tries to send <npfx> parts from <prefix> followed by <nmsg> parts from <msg>
* to ring <ring>. The message is sent atomically. It may be truncated to
* <maxlen> bytes if <maxlen> is non-null. There is no distinction between the
* two lists, it's just a convenience to help the caller prepend some prefixes
* when necessary. It takes the ring's write lock to make sure no other thread
* will touch the buffer during the update. Returns the number of bytes sent,
* or <=0 on failure.
*/
ssize_t ring_write(struct ring *ring, size_t maxlen, const struct ist pfx[], size_t npfx, const struct ist msg[], size_t nmsg)
{
struct buffer *buf = &ring->buf;
struct appctx *appctx;
size_t totlen = 0;
size_t lenlen;
uint64_t dellen;
int dellenlen;
ssize_t sent = 0;
int i;
/* we have to find some room to add our message (the buffer is
* never empty and at least contains the previous counter) and
* to update both the buffer contents and heads at the same
* time (it's doable using atomic ops but not worth the
* trouble, let's just lock). For this we first need to know
* the total message's length. We cannot measure it while
* copying due to the varint encoding of the length.
*/
for (i = 0; i < npfx; i++)
totlen += pfx[i].len;
for (i = 0; i < nmsg; i++)
totlen += msg[i].len;
if (totlen > maxlen)
totlen = maxlen;
lenlen = varint_bytes(totlen);
HA_RWLOCK_WRLOCK(LOGSRV_LOCK, &ring->lock);
if (lenlen + totlen + 1 + 1 > b_size(buf))
goto done_buf;
while (b_room(buf) < lenlen + totlen + 1) {
/* we need to delete the oldest message (from the end),
* and we have to stop if there's a reader stuck there.
* Unless there's corruption in the buffer it's guaranteed
* that we have enough data to find 1 counter byte, a
* varint-encoded length (1 byte min) and the message
* payload (0 bytes min).
*/
if (*b_head(buf))
goto done_buf;
dellenlen = b_peek_varint(buf, 1, &dellen);
if (!dellenlen)
goto done_buf;
BUG_ON(b_data(buf) < 1 + dellenlen + dellen);
b_del(buf, 1 + dellenlen + dellen);
ring->ofs += 1 + dellenlen + dellen;
}
/* OK now we do have room */
__b_put_varint(buf, totlen);
totlen = 0;
for (i = 0; i < npfx; i++) {
size_t len = pfx[i].len;
if (len + totlen > maxlen)
len = maxlen - totlen;
if (len)
__b_putblk(buf, pfx[i].ptr, len);
totlen += len;
}
for (i = 0; i < nmsg; i++) {
size_t len = msg[i].len;
if (len + totlen > maxlen)
len = maxlen - totlen;
if (len)
__b_putblk(buf, msg[i].ptr, len);
totlen += len;
}
*b_tail(buf) = 0; buf->data++; // new read counter
sent = lenlen + totlen + 1;
/* notify potential readers */
list_for_each_entry(appctx, &ring->waiters, wait_entry)
appctx_wakeup(appctx);
done_buf:
HA_RWLOCK_WRUNLOCK(LOGSRV_LOCK, &ring->lock);
return sent;
}
/* Tries to attach appctx <appctx> as a new reader on ring <ring>. This is
* meant to be used by low level appctx code such as CLI or ring forwarding.
* For higher level functions, please see the relevant parts in appctx or CLI.
* It returns non-zero on success or zero on failure if too many users are
* already attached. On success, the caller MUST call ring_detach_appctx()
* to detach itself, even if it was never woken up.
*/
int ring_attach(struct ring *ring)
{
int users = ring->readers_count;
do {
if (users >= 255)
return 0;
} while (!_HA_ATOMIC_CAS(&ring->readers_count, &users, users + 1));
return 1;
}
/* detach an appctx from a ring. The appctx is expected to be waiting at
* offset <ofs>. Nothing is done if <ring> is NULL.
*/
void ring_detach_appctx(struct ring *ring, struct appctx *appctx, size_t ofs)
{
if (!ring)
return;
HA_RWLOCK_WRLOCK(LOGSRV_LOCK, &ring->lock);
if (ofs != ~0) {
/* reader was still attached */
ofs -= ring->ofs;
BUG_ON(ofs >= b_size(&ring->buf));
LIST_DEL_INIT(&appctx->wait_entry);
HA_ATOMIC_DEC(b_peek(&ring->buf, ofs));
}
HA_ATOMIC_DEC(&ring->readers_count);
HA_RWLOCK_WRUNLOCK(LOGSRV_LOCK, &ring->lock);
}
/* Tries to attach CLI handler <appctx> as a new reader on ring <ring>. This is
* meant to be used when registering a CLI function to dump a buffer, so it
* returns zero on success, or non-zero on failure with a message in the appctx
* CLI context. It automatically sets the io_handler and io_release callbacks if
* they were not set. The <flags> take a combination of RING_WF_*.
*/
int ring_attach_cli(struct ring *ring, struct appctx *appctx, uint flags)
{
struct show_ring_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
if (!ring_attach(ring))
return cli_err(appctx,
"Sorry, too many watchers (255) on this ring buffer. "
"What could it have so interesting to attract so many watchers ?");
if (!appctx->io_handler)
appctx->io_handler = cli_io_handler_show_ring;
if (!appctx->io_release)
appctx->io_release = cli_io_release_show_ring;
memset(ctx, 0, sizeof(*ctx));
ctx->ring = ring;
ctx->ofs = ~0; // start from the oldest event
ctx->flags = flags;
return 0;
}
/* This function dumps all events from the ring whose pointer is in <p0> into
* the appctx's output buffer, and takes from <o0> the seek offset into the
* buffer's history (0 for oldest known event). It looks at <i0> for boolean
* options: bit0 means it must wait for new data or any key to be pressed. Bit1
* means it must seek directly to the end to wait for new contents. It returns
* 0 if the output buffer or events are missing is full and it needs to be
* called again, otherwise non-zero. It is meant to be used with
* cli_release_show_ring() to clean up.
*/
int cli_io_handler_show_ring(struct appctx *appctx)
{
struct show_ring_ctx *ctx = appctx->svcctx;
struct stconn *sc = appctx_sc(appctx);
struct ring *ring = ctx->ring;
struct buffer *buf = &ring->buf;
size_t ofs = ctx->ofs;
size_t last_ofs;
uint64_t msg_len;
size_t len, cnt;
int ret;
if (unlikely(sc_ic(sc)->flags & (CF_WRITE_ERROR|CF_SHUTW)))
return 1;
HA_RWLOCK_WRLOCK(LOGSRV_LOCK, &ring->lock);
LIST_DEL_INIT(&appctx->wait_entry);
HA_RWLOCK_WRUNLOCK(LOGSRV_LOCK, &ring->lock);
HA_RWLOCK_RDLOCK(LOGSRV_LOCK, &ring->lock);
/* explanation for the initialization below: it would be better to do
* this in the parsing function but this would occasionally result in
* dropped events because we'd take a reference on the oldest message
* and keep it while being scheduled. Thus instead let's take it the
* first time we enter here so that we have a chance to pass many
* existing messages before grabbing a reference to a location. This
* value cannot be produced after initialization.
*/
if (unlikely(ofs == ~0)) {
ofs = 0;
/* going to the end means looking at tail-1 */
if (ctx->flags & RING_WF_SEEK_NEW)
ofs += b_data(buf) - 1;
HA_ATOMIC_INC(b_peek(buf, ofs));
ofs += ring->ofs;
}
/* we were already there, adjust the offset to be relative to
* the buffer's head and remove us from the counter.
*/
ofs -= ring->ofs;
BUG_ON(ofs >= buf->size);
HA_ATOMIC_DEC(b_peek(buf, ofs));
/* in this loop, ofs always points to the counter byte that precedes
* the message so that we can take our reference there if we have to
* stop before the end (ret=0).
*/
ret = 1;
while (ofs + 1 < b_data(buf)) {
cnt = 1;
len = b_peek_varint(buf, ofs + cnt, &msg_len);
if (!len)
break;
cnt += len;
BUG_ON(msg_len + ofs + cnt + 1 > b_data(buf));
if (unlikely(msg_len + 1 > b_size(&trash))) {
/* too large a message to ever fit, let's skip it */
ofs += cnt + msg_len;
continue;
}
chunk_reset(&trash);
len = b_getblk(buf, trash.area, msg_len, ofs + cnt);
trash.data += len;
trash.area[trash.data++] = '\n';
if (applet_putchk(appctx, &trash) == -1) {
ret = 0;
break;
}
ofs += cnt + msg_len;
}
HA_ATOMIC_INC(b_peek(buf, ofs));
ofs += ring->ofs;
last_ofs = ring->ofs;
ctx->ofs = ofs;
HA_RWLOCK_RDUNLOCK(LOGSRV_LOCK, &ring->lock);
if (ret && (ctx->flags & RING_WF_WAIT_MODE)) {
/* we've drained everything and are configured to wait for more
* data or an event (keypress, close)
*/
if (!sc_oc(sc)->output && !(sc_oc(sc)->flags & CF_SHUTW)) {
/* let's be woken up once new data arrive */
HA_RWLOCK_WRLOCK(LOGSRV_LOCK, &ring->lock);
LIST_APPEND(&ring->waiters, &appctx->wait_entry);
ofs = ring->ofs;
HA_RWLOCK_WRUNLOCK(LOGSRV_LOCK, &ring->lock);
if (ofs != last_ofs) {
/* more data was added into the ring between the
* unlock and the lock, and the writer might not
* have seen us. We need to reschedule a read.
*/
applet_have_more_data(appctx);
} else
applet_have_no_more_data(appctx);
ret = 0;
}
/* always drain all the request */
co_skip(sc_oc(sc), sc_oc(sc)->output);
}
return ret;
}
/* must be called after cli_io_handler_show_ring() above */
void cli_io_release_show_ring(struct appctx *appctx)
{
struct show_ring_ctx *ctx = appctx->svcctx;
struct ring *ring = ctx->ring;
size_t ofs = ctx->ofs;
ring_detach_appctx(ring, appctx, ofs);
}
/*
* Local variables:
* c-indent-level: 8
* c-basic-offset: 8
* End:
*/
|