diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-13 12:18:05 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-13 12:18:05 +0000 |
commit | b46aad6df449445a9fc4aa7b32bd40005438e3f7 (patch) | |
tree | 751aa858ca01f35de800164516b298887382919d /reg-tests/http-messaging/http_abortonclose.vtc | |
parent | Initial commit. (diff) | |
download | haproxy-b46aad6df449445a9fc4aa7b32bd40005438e3f7.tar.xz haproxy-b46aad6df449445a9fc4aa7b32bd40005438e3f7.zip |
Adding upstream version 2.9.5.upstream/2.9.5
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'reg-tests/http-messaging/http_abortonclose.vtc')
-rw-r--r-- | reg-tests/http-messaging/http_abortonclose.vtc | 226 |
1 files changed, 226 insertions, 0 deletions
diff --git a/reg-tests/http-messaging/http_abortonclose.vtc b/reg-tests/http-messaging/http_abortonclose.vtc new file mode 100644 index 0000000..ea57f3d --- /dev/null +++ b/reg-tests/http-messaging/http_abortonclose.vtc @@ -0,0 +1,226 @@ +varnishtest "A test for the abortonclose option (H1 only)" +feature ignore_unknown_macro + +# NOTE : This test may fail if too many vtest are running in parallel because +# the port reserved for closed s1 server may be reused by another vtest + +feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.2-dev0)'" +#REGTEST_TYPE=slow + +# b0 : Wait s1 was detected as DOWN to be sure it is stopped +# b1 : Don't send /c4 before /c3 was received by s2 server +# b2 : Used to receive syslog messages in the right order +# b3 : finish c3 before s2 + +barrier b0 cond 3 -cyclic +barrier b1 cond 2 -cyclic +barrier b2 cond 2 -cyclic +barrier b3 cond 2 -cyclic + +server s1 { +} -start +server s1 -break + +server s2 { + rxreq + + # unlock c4 + barrier b1 sync + + # wait end of c3 + barrier b3 sync + + expect_close +} -start + +syslog S1 -level info { + recv alert + expect ~ "[^:\\[ ]*\\[[0-9]*\\]: Server check/srv1 is DOWN.*" + barrier b0 sync + + recv + expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1_1/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* 503 .* - - SC-- .* .* \"GET /c1 HTTP/1\\.1\"" + barrier b2 sync + recv + expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1_2/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* -1 .* - - CC-- .* .* \"GET /c2 HTTP/1\\.1\"" + barrier b2 sync + recv + expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be2/<NOSRV> [0-9]*/[0-9]*/-1/-1/[0-9]* -1 .* - - CQ-- .* .* \"GET /c4 HTTP/1\\.1\"" + barrier b2 sync + recv + expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be2/srv1 [0-9]*/[0-9]*/[0-9]*/-1/[0-9]* 400 .* - - CH-- .* .* \"GET /c3 HTTP/1\\.1\"" +} -start + +syslog S2 -level info { + recv alert + expect ~ "[^:\\[ ]*\\[[0-9]*\\]: Server check/srv1 is DOWN.*" + barrier b0 sync + + recv + expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* -1 .* - - CC-- .* .* \"GET /c5 HTTP/1\\.1\"" + barrier b2 sync + recv + expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be2/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* 503 .* - - SC-- .* .* \"GET /c6 HTTP/1\\.1\"" +} -start + +haproxy h1 -conf { + global + # WT: limit false-positives causing "HTTP header incomplete" due to + # idle server connections being randomly used and randomly expiring + # under us. + tune.idle-pool.shared off + + defaults + mode http + option abortonclose + retries 1 + timeout client 10s + timeout server 10s + timeout connect 100ms + timeout queue 5s + + frontend fe1 + option httplog + log ${S1_addr}:${S1_port} local0 debug err + bind "fd@${fe1}" + use_backend be1_1 if { path /c1 } + use_backend be1_2 if { path /c2 } + + frontend fe2 + option httplog + log ${S1_addr}:${S1_port} local0 debug err + bind "fd@${fe2}" + use_backend be2 + + backend be1_1 + server srv1 ${s1_addr}:${s1_port} + + backend be1_2 + timeout connect 1s + retries 10 + server srv1 ${s1_addr}:${s1_port} + + backend be2 + server srv1 ${s2_addr}:${s2_port} maxconn 1 + + backend check + server srv1 ${s1_addr}:${s1_port} check + log ${S1_addr}:${S1_port} local0 debug alert +} -start + + +haproxy h2 -conf { + global + # WT: limit false-positives causing "HTTP header incomplete" due to + # idle server connections being randomly used and randomly expiring + # under us. + tune.idle-pool.shared off + + defaults + mode http + retries 1 + timeout client 10s + timeout server 10s + timeout connect 100ms + timeout queue 5s + + frontend fe1 + option httplog + log ${S2_addr}:${S2_port} local0 debug err + bind "fd@${fe1}" + use_backend be1 + + backend be1 + option abortonclose + server srv1 ${s1_addr}:${s1_port} + + defaults + mode http + option abortonclose + retries 1 + timeout client 10s + timeout server 10s + timeout connect 100ms + timeout queue 5s + + frontend fe2 + option httplog + log ${S2_addr}:${S2_port} local0 debug err + bind "fd@${fe2}" + use_backend be2 + + backend be2 + no option abortonclose + server srv1 ${s1_addr}:${s1_port} + + backend check + server srv1 ${s1_addr}:${s1_port} check + log ${S2_addr}:${S2_port} local0 debug alert +} -start + + +# Wait s1 was detected as DOWN +barrier b0 sync + +# No server, wait all connection retries : SC-- +client c1 -connect ${h1_fe1_sock} { + txreq -url /c1 + rxresp + expect resp.status == 503 +} -run + +# Wait c1 log entry +barrier b2 sync + +# No server, abort during connections retries : CC-- +client c2 -connect ${h1_fe1_sock} { + txreq -url /c2 +} -run + +# Wait c2 log entry +barrier b2 sync + +# server with maxconn=1, abort waiting the server reply : CH-- +client c3 -connect ${h1_fe2_sock} { + txreq -url /c3 + + # Wait c4 log entry + barrier b2 sync +} -start + +# server with maxconn=1, abort waiting in the queue (c3 still attached) : CQ-- +client c4 -connect ${h1_fe2_sock} { + # Wait s2 receives c3 request + barrier b1 sync + + txreq -url /c4 + delay .2 +} -run + +client c3 -wait + +# unlock s2 +barrier b3 sync + +syslog S1 -wait + + +# No server, abort during connections retries : CC-- +# abortonclose on backend only +client c5 -connect ${h2_fe1_sock} { + txreq -url /c5 +} -run + +# Wait c5 log entry +barrier b2 sync + +# No server, wait all connection retries : SC-- +# abortonclose in defaults section but disabled by backend +client c6 -connect ${h2_fe2_sock} { + txreq -url /c6 + rxresp + expect resp.status == 503 +} -run + + +syslog S2 -wait |