summaryrefslogtreecommitdiffstats
path: root/src/lb_fwrr.c
blob: a762623f331527ab795525961447644a7545c13f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
/*
 * Fast Weighted Round Robin load balancing algorithm.
 *
 * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 *
 */

#include <import/eb32tree.h>
#include <haproxy/api.h>
#include <haproxy/backend.h>
#include <haproxy/queue.h>
#include <haproxy/server-t.h>


static inline void fwrr_remove_from_tree(struct server *s);
static inline void fwrr_queue_by_weight(struct eb_root *root, struct server *s);
static inline void fwrr_dequeue_srv(struct server *s);
static void fwrr_get_srv(struct server *s);
static void fwrr_queue_srv(struct server *s);


/* This function updates the server trees according to server <srv>'s new
 * state. It should be called when server <srv>'s status changes to down.
 * It is not important whether the server was already down or not. It is not
 * important either that the new state is completely down (the caller may not
 * know all the variables of a server's state).
 *
 * The server's lock must be held. The lbprm's lock will be used.
 */
static void fwrr_set_server_status_down(struct server *srv)
{
	struct proxy *p = srv->proxy;
	struct fwrr_group *grp;

	if (!srv_lb_status_changed(srv))
		return;

	if (srv_willbe_usable(srv))
		goto out_update_state;

	HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);

	if (!srv_currently_usable(srv))
		/* server was already down */
		goto out_update_backend;

	grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act;
	grp->next_weight -= srv->cur_eweight;

	if (srv->flags & SRV_F_BACKUP) {
		p->lbprm.tot_wbck = p->lbprm.fwrr.bck.next_weight;
		p->srv_bck--;

		if (srv == p->lbprm.fbck) {
			/* we lost the first backup server in a single-backup
			 * configuration, we must search another one.
			 */
			struct server *srv2 = p->lbprm.fbck;
			do {
				srv2 = srv2->next;
			} while (srv2 &&
				 !((srv2->flags & SRV_F_BACKUP) &&
				   srv_willbe_usable(srv2)));
			p->lbprm.fbck = srv2;
		}
	} else {
		p->lbprm.tot_wact = p->lbprm.fwrr.act.next_weight;
		p->srv_act--;
	}

	fwrr_dequeue_srv(srv);
	fwrr_remove_from_tree(srv);

out_update_backend:
	/* check/update tot_used, tot_weight */
	update_backend_weight(p);
	HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);

 out_update_state:
	srv_lb_commit_status(srv);
}

/* This function updates the server trees according to server <srv>'s new
 * state. It should be called when server <srv>'s status changes to up.
 * It is not important whether the server was already down or not. It is not
 * important either that the new state is completely UP (the caller may not
 * know all the variables of a server's state). This function will not change
 * the weight of a server which was already up.
 *
 * The server's lock must be held. The lbprm's lock will be used.
 */
static void fwrr_set_server_status_up(struct server *srv)
{
	struct proxy *p = srv->proxy;
	struct fwrr_group *grp;

	if (!srv_lb_status_changed(srv))
		return;

	if (!srv_willbe_usable(srv))
		goto out_update_state;

	HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);

	if (srv_currently_usable(srv))
		/* server was already up */
		goto out_update_backend;

	grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act;
	grp->next_weight += srv->next_eweight;

	if (srv->flags & SRV_F_BACKUP) {
		p->lbprm.tot_wbck = p->lbprm.fwrr.bck.next_weight;
		p->srv_bck++;

		if (!(p->options & PR_O_USE_ALL_BK)) {
			if (!p->lbprm.fbck) {
				/* there was no backup server anymore */
				p->lbprm.fbck = srv;
			} else {
				/* we may have restored a backup server prior to fbck,
				 * in which case it should replace it.
				 */
				struct server *srv2 = srv;
				do {
					srv2 = srv2->next;
				} while (srv2 && (srv2 != p->lbprm.fbck));
				if (srv2)
					p->lbprm.fbck = srv;
			}
		}
	} else {
		p->lbprm.tot_wact = p->lbprm.fwrr.act.next_weight;
		p->srv_act++;
	}

	/* note that eweight cannot be 0 here */
	fwrr_get_srv(srv);
	srv->npos = grp->curr_pos + (grp->next_weight + grp->curr_weight - grp->curr_pos) / srv->next_eweight;
	fwrr_queue_srv(srv);

out_update_backend:
	/* check/update tot_used, tot_weight */
	update_backend_weight(p);
	HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);

 out_update_state:
	srv_lb_commit_status(srv);
}

/* This function must be called after an update to server <srv>'s effective
 * weight. It may be called after a state change too.
 *
 * The server's lock must be held. The lbprm's lock will be used.
 */
static void fwrr_update_server_weight(struct server *srv)
{
	int old_state, new_state;
	struct proxy *p = srv->proxy;
	struct fwrr_group *grp;

	if (!srv_lb_status_changed(srv))
		return;

	/* If changing the server's weight changes its state, we simply apply
	 * the procedures we already have for status change. If the state
	 * remains down, the server is not in any tree, so it's as easy as
	 * updating its values. If the state remains up with different weights,
	 * there are some computations to perform to find a new place and
	 * possibly a new tree for this server.
	 */
	 
	old_state = srv_currently_usable(srv);
	new_state = srv_willbe_usable(srv);

	if (!old_state && !new_state) {
		srv_lb_commit_status(srv);
		return;
	}
	else if (!old_state && new_state) {
		fwrr_set_server_status_up(srv);
		return;
	}
	else if (old_state && !new_state) {
		fwrr_set_server_status_down(srv);
		return;
	}

	HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);

	grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act;
	grp->next_weight = grp->next_weight - srv->cur_eweight + srv->next_eweight;

	p->lbprm.tot_wact = p->lbprm.fwrr.act.next_weight;
	p->lbprm.tot_wbck = p->lbprm.fwrr.bck.next_weight;

	if (srv->lb_tree == grp->init) {
		fwrr_dequeue_srv(srv);
		fwrr_queue_by_weight(grp->init, srv);
	}
	else if (!srv->lb_tree) {
		/* FIXME: server was down. This is not possible right now but
		 * may be needed soon for slowstart or graceful shutdown.
		 */
		fwrr_dequeue_srv(srv);
		fwrr_get_srv(srv);
		srv->npos = grp->curr_pos + (grp->next_weight + grp->curr_weight - grp->curr_pos) / srv->next_eweight;
		fwrr_queue_srv(srv);
	} else {
		/* The server is either active or in the next queue. If it's
		 * still in the active queue and it has not consumed all of its
		 * places, let's adjust its next position.
		 */
		fwrr_get_srv(srv);

		if (srv->next_eweight > 0) {
			int prev_next = srv->npos;
			int step = grp->next_weight / srv->next_eweight;

			srv->npos = srv->lpos + step;
			srv->rweight = 0;

			if (srv->npos > prev_next)
				srv->npos = prev_next;
			if (srv->npos < grp->curr_pos + 2)
				srv->npos = grp->curr_pos + step;
		} else {
			/* push it into the next tree */
			srv->npos = grp->curr_pos + grp->curr_weight;
		}

		fwrr_dequeue_srv(srv);
		fwrr_queue_srv(srv);
	}

	update_backend_weight(p);
	HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);

	srv_lb_commit_status(srv);
}

/* Remove a server from a tree. It must have previously been dequeued. This
 * function is meant to be called when a server is going down or has its
 * weight disabled.
 *
 * The lbprm's lock must be held. The server's lock is not used.
 */
static inline void fwrr_remove_from_tree(struct server *s)
{
	s->lb_tree = NULL;
}

/* Queue a server in the weight tree <root>, assuming the weight is >0.
 * We want to sort them by inverted weights, because we need to place
 * heavy servers first in order to get a smooth distribution.
 *
 * The lbprm's lock must be held. The server's lock is not used.
 */
static inline void fwrr_queue_by_weight(struct eb_root *root, struct server *s)
{
	s->lb_node.key = SRV_EWGHT_MAX - s->next_eweight;
	eb32_insert(root, &s->lb_node);
	s->lb_tree = root;
}

/* This function is responsible for building the weight trees in case of fast
 * weighted round-robin. It also sets p->lbprm.wdiv to the eweight to uweight
 * ratio. Both active and backup groups are initialized.
 */
void fwrr_init_server_groups(struct proxy *p)
{
	struct server *srv;
	struct eb_root init_head = EB_ROOT;

	p->lbprm.set_server_status_up   = fwrr_set_server_status_up;
	p->lbprm.set_server_status_down = fwrr_set_server_status_down;
	p->lbprm.update_server_eweight  = fwrr_update_server_weight;

	p->lbprm.wdiv = BE_WEIGHT_SCALE;
	for (srv = p->srv; srv; srv = srv->next) {
		srv->next_eweight = (srv->uweight * p->lbprm.wdiv + p->lbprm.wmult - 1) / p->lbprm.wmult;
		srv_lb_commit_status(srv);
	}

	recount_servers(p);
	update_backend_weight(p);

	/* prepare the active servers group */
	p->lbprm.fwrr.act.curr_pos = p->lbprm.fwrr.act.curr_weight =
		p->lbprm.fwrr.act.next_weight = p->lbprm.tot_wact;
	p->lbprm.fwrr.act.curr = p->lbprm.fwrr.act.t0 =
		p->lbprm.fwrr.act.t1 = init_head;
	p->lbprm.fwrr.act.init = &p->lbprm.fwrr.act.t0;
	p->lbprm.fwrr.act.next = &p->lbprm.fwrr.act.t1;

	/* prepare the backup servers group */
	p->lbprm.fwrr.bck.curr_pos = p->lbprm.fwrr.bck.curr_weight =
		p->lbprm.fwrr.bck.next_weight = p->lbprm.tot_wbck;
	p->lbprm.fwrr.bck.curr = p->lbprm.fwrr.bck.t0 =
		p->lbprm.fwrr.bck.t1 = init_head;
	p->lbprm.fwrr.bck.init = &p->lbprm.fwrr.bck.t0;
	p->lbprm.fwrr.bck.next = &p->lbprm.fwrr.bck.t1;

	/* queue active and backup servers in two distinct groups */
	for (srv = p->srv; srv; srv = srv->next) {
		if (!srv_currently_usable(srv))
			continue;
		fwrr_queue_by_weight((srv->flags & SRV_F_BACKUP) ?
				p->lbprm.fwrr.bck.init :
				p->lbprm.fwrr.act.init,
				srv);
	}
}

/* simply removes a server from a weight tree.
 *
 * The lbprm's lock must be held. The server's lock is not used.
 */
static inline void fwrr_dequeue_srv(struct server *s)
{
	eb32_delete(&s->lb_node);
}

/* queues a server into the appropriate group and tree depending on its
 * backup status, and ->npos. If the server is disabled, simply assign
 * it to the NULL tree.
 *
 * The lbprm's lock must be held. The server's lock is not used.
 */
static void fwrr_queue_srv(struct server *s)
{
	struct proxy *p = s->proxy;
	struct fwrr_group *grp;

	grp = (s->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act;

	/* Delay everything which does not fit into the window and everything
	 * which does not fit into the theoretical new window.
	 */
	if (!srv_willbe_usable(s)) {
		fwrr_remove_from_tree(s);
	}
	else if (s->next_eweight <= 0 ||
		 s->npos >= 2 * grp->curr_weight ||
		 s->npos >= grp->curr_weight + grp->next_weight) {
		/* put into next tree, and readjust npos in case we could
		 * finally take this back to current. */
		s->npos -= grp->curr_weight;
		fwrr_queue_by_weight(grp->next, s);
	}
	else {
		/* The sorting key is stored in units of s->npos * user_weight
		 * in order to avoid overflows. As stated in backend.h, the
		 * lower the scale, the rougher the weights modulation, and the
		 * higher the scale, the lower the number of servers without
		 * overflow. With this formula, the result is always positive,
		 * so we can use eb32_insert().
		 */
		s->lb_node.key = SRV_UWGHT_RANGE * s->npos +
			(unsigned)(SRV_EWGHT_MAX + s->rweight - s->next_eweight) / BE_WEIGHT_SCALE;

		eb32_insert(&grp->curr, &s->lb_node);
		s->lb_tree = &grp->curr;
	}
}

/* prepares a server when extracting it from the "init" tree.
 *
 * The lbprm's lock must be held. The server's lock is not used.
 */
static inline void fwrr_get_srv_init(struct server *s)
{
	s->npos = s->rweight = 0;
}

/* prepares a server when extracting it from the "next" tree.
 *
 * The lbprm's lock must be held. The server's lock is not used.
 */
static inline void fwrr_get_srv_next(struct server *s)
{
	struct fwrr_group *grp = (s->flags & SRV_F_BACKUP) ?
		&s->proxy->lbprm.fwrr.bck :
		&s->proxy->lbprm.fwrr.act;

	s->npos += grp->curr_weight;
}

/* prepares a server when it was marked down.
 *
 * The lbprm's lock must be held. The server's lock is not used.
 */
static inline void fwrr_get_srv_down(struct server *s)
{
	struct fwrr_group *grp = (s->flags & SRV_F_BACKUP) ?
		&s->proxy->lbprm.fwrr.bck :
		&s->proxy->lbprm.fwrr.act;

	s->npos = grp->curr_pos;
}

/* prepares a server when extracting it from its tree.
 *
 * The lbprm's lock must be held. The server's lock is not used.
 */
static void fwrr_get_srv(struct server *s)
{
	struct proxy *p = s->proxy;
	struct fwrr_group *grp = (s->flags & SRV_F_BACKUP) ?
		&p->lbprm.fwrr.bck :
		&p->lbprm.fwrr.act;

	if (s->lb_tree == grp->init) {
		fwrr_get_srv_init(s);
	}
	else if (s->lb_tree == grp->next) {
		fwrr_get_srv_next(s);
	}
	else if (s->lb_tree == NULL) {
		fwrr_get_srv_down(s);
	}
}

/* switches trees "init" and "next" for FWRR group <grp>. "init" should be empty
 * when this happens, and "next" filled with servers sorted by weights.
 *
 * The lbprm's lock must be held. The server's lock is not used.
 */
static inline void fwrr_switch_trees(struct fwrr_group *grp)
{
	struct eb_root *swap;
	swap = grp->init;
	grp->init = grp->next;
	grp->next = swap;
	grp->curr_weight = grp->next_weight;
	grp->curr_pos = grp->curr_weight;
}

/* return next server from the current tree in FWRR group <grp>, or a server
 * from the "init" tree if appropriate. If both trees are empty, return NULL.
 *
 * The lbprm's lock must be held. The server's lock is not used.
 */
static struct server *fwrr_get_server_from_group(struct fwrr_group *grp)
{
	struct eb32_node *node1;
	struct eb32_node *node2;
	struct server *s1 = NULL;
	struct server *s2 = NULL;

	node1 = eb32_first(&grp->curr);
	if (node1) {
		s1 = eb32_entry(node1, struct server, lb_node);
		if (s1->cur_eweight && s1->npos <= grp->curr_pos)
			return s1;
	}

	/* Either we have no server left, or we have a hole. We'll look in the
	 * init tree or a better proposal. At this point, if <s1> is non-null,
	 * it is guaranteed to remain available as the tree is locked.
	 */
	node2 = eb32_first(grp->init);
	if (node2) {
		s2 = eb32_entry(node2, struct server, lb_node);
		if (s2->cur_eweight) {
			fwrr_get_srv_init(s2);
			return s2;
		}
	}
	return s1;
}

/* Computes next position of server <s> in the group. Nothing is done if <s>
 * has a zero weight. 
 *
 * The lbprm's lock must be held to protect lpos/npos/rweight.
 */
static inline void fwrr_update_position(struct fwrr_group *grp, struct server *s)
{
	unsigned int eweight = *(volatile unsigned int *)&s->cur_eweight;

	if (!eweight)
		return;

	if (!s->npos) {
		/* first time ever for this server */
		s->npos     = grp->curr_pos;
	}

	s->lpos     = s->npos;
	s->npos    += grp->next_weight / eweight;
	s->rweight += grp->next_weight % eweight;

	if (s->rweight >= eweight) {
		s->rweight -= eweight;
		s->npos++;
	}
}

/* Return next server from the current tree in backend <p>, or a server from
 * the init tree if appropriate. If both trees are empty, return NULL.
 * Saturated servers are skipped and requeued.
 *
 * The lbprm's lock will be used in R/W mode. The server's lock is not used.
 */
struct server *fwrr_get_next_server(struct proxy *p, struct server *srvtoavoid)
{
	struct server *srv, *full, *avoided;
	struct fwrr_group *grp;
	int switched;

	HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
	if (p->srv_act)
		grp = &p->lbprm.fwrr.act;
	else if (p->lbprm.fbck) {
		srv = p->lbprm.fbck;
		goto out;
	}
	else if (p->srv_bck)
		grp = &p->lbprm.fwrr.bck;
	else {
		srv = NULL;
		goto out;
	}

	switched = 0;
	avoided = NULL;
	full = NULL; /* NULL-terminated list of saturated servers */
	while (1) {
		/* if we see an empty group, let's first try to collect weights
		 * which might have recently changed.
		 */
		if (!grp->curr_weight)
			grp->curr_pos = grp->curr_weight = grp->next_weight;

		/* get first server from the "current" tree. When the end of
		 * the tree is reached, we may have to switch, but only once.
		 */
		while (1) {
			srv = fwrr_get_server_from_group(grp);
			if (srv)
				break;
			if (switched) {
				if (avoided) {
					srv = avoided;
					goto take_this_one;
				}
				goto requeue_servers;
			}
			switched = 1;
			fwrr_switch_trees(grp);
		}

		/* OK, we have a server. However, it may be saturated, in which
		 * case we don't want to reconsider it for now. We'll update
		 * its position and dequeue it anyway, so that we can move it
		 * to a better place afterwards.
		 */
		fwrr_update_position(grp, srv);
		fwrr_dequeue_srv(srv);
		grp->curr_pos++;
		if (!srv->maxconn || (!srv->queue.length && srv->served < srv_dynamic_maxconn(srv))) {
			/* make sure it is not the server we are trying to exclude... */
			if (srv != srvtoavoid || avoided)
				break;

			avoided = srv; /* ...but remember that is was selected yet avoided */
		}

		/* the server is saturated or avoided, let's chain it for later reinsertion.
		 */
		srv->next_full = full;
		full = srv;
	}

 take_this_one:
	/* OK, we got the best server, let's update it */
	fwrr_queue_srv(srv);

 requeue_servers:
	/* Requeue all extracted servers. If full==srv then it was
	 * avoided (unsuccessfully) and chained, omit it now. The
	 * only way to get there is by having <avoided>==NULL or
	 * <avoided>==<srv>.
	 */
	if (unlikely(full != NULL)) {
		if (switched) {
			/* the tree has switched, requeue all extracted servers
			 * into "init", because their place was lost, and only
			 * their weight matters.
			 */
			do {
				if (likely(full != srv))
					fwrr_queue_by_weight(grp->init, full);
				full = full->next_full;
			} while (full);
		} else {
			/* requeue all extracted servers just as if they were consumed
			 * so that they regain their expected place.
			 */
			do {
				if (likely(full != srv))
					fwrr_queue_srv(full);
				full = full->next_full;
			} while (full);
		}
	}
 out:
	HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
	return srv;
}

/*
 * Local variables:
 *  c-indent-level: 8
 *  c-basic-offset: 8
 * End:
 */