summaryrefslogtreecommitdiffstats
path: root/reg-tests/http-messaging
diff options
context:
space:
mode:
Diffstat (limited to '')
l---------reg-tests/http-messaging/common.pem1
-rw-r--r--reg-tests/http-messaging/h1_host_normalization.vtc762
-rw-r--r--reg-tests/http-messaging/h1_to_h1.vtc301
-rw-r--r--reg-tests/http-messaging/h2_desync_attacks.vtc167
-rw-r--r--reg-tests/http-messaging/h2_to_h1.vtc324
-rw-r--r--reg-tests/http-messaging/http_abortonclose.vtc226
-rw-r--r--reg-tests/http-messaging/http_bodyless_response.vtc128
-rw-r--r--reg-tests/http-messaging/http_bodyless_spliced_response.vtc89
-rw-r--r--reg-tests/http-messaging/http_msg_full_on_eom.vtc62
-rw-r--r--reg-tests/http-messaging/http_request_buffer.vtc135
-rw-r--r--reg-tests/http-messaging/http_splicing.vtc77
-rw-r--r--reg-tests/http-messaging/http_splicing_chunk.vtc74
-rw-r--r--reg-tests/http-messaging/http_transfer_encoding.vtc202
-rw-r--r--reg-tests/http-messaging/http_wait_for_body.vtc171
-rw-r--r--reg-tests/http-messaging/protocol_upgrade.vtc228
-rw-r--r--reg-tests/http-messaging/scheme_based_normalize.vtc125
-rw-r--r--reg-tests/http-messaging/srv_ws.vtc180
-rw-r--r--reg-tests/http-messaging/truncated.vtc101
-rw-r--r--reg-tests/http-messaging/websocket.vtc211
19 files changed, 3564 insertions, 0 deletions
diff --git a/reg-tests/http-messaging/common.pem b/reg-tests/http-messaging/common.pem
new file mode 120000
index 0000000..a4433d5
--- /dev/null
+++ b/reg-tests/http-messaging/common.pem
@@ -0,0 +1 @@
+../ssl/common.pem \ No newline at end of file
diff --git a/reg-tests/http-messaging/h1_host_normalization.vtc b/reg-tests/http-messaging/h1_host_normalization.vtc
new file mode 100644
index 0000000..48174b8
--- /dev/null
+++ b/reg-tests/http-messaging/h1_host_normalization.vtc
@@ -0,0 +1,762 @@
+varnishtest "H1 authority validation and host normalizarion based on the scheme (rfc3982 6.3.2) or the method (connect)"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.6-dev0)'"
+feature ignore_unknown_macro
+
+barrier b1 cond 2 -cyclic
+
+syslog S1 -level info {
+ # C1
+ recv
+ expect ~ "^.* uri: GET http://toto:poue@hostname/c1 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C2
+ recv
+ expect ~ "^.* uri: GET http://hostname:8080/c2 HTTP/1.1; host: {hostname:8080}$"
+ barrier b1 sync
+
+ # C3
+ recv
+ expect ~ "^.* uri: GET https://hostname/c3 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C4
+ recv
+ expect ~ "^.* uri: GET https://hostname:80/c4 HTTP/1.1; host: {hostname:80}$"
+ barrier b1 sync
+
+ # C5
+ recv
+ expect ~ "^.* uri: CONNECT hostname:80 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+ recv
+ expect ~ "^.* uri: CONNECT hostname:80 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+ recv
+ expect ~ "^.* uri: CONNECT hostname:80 HTTP/1.1; host: {hostname:}$"
+ barrier b1 sync
+
+ # C6
+ recv
+ expect ~ "^.* uri: CONNECT hostname:443 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+ recv
+ expect ~ "^.* uri: CONNECT hostname:443 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+ recv
+ expect ~ "^.* uri: CONNECT hostname:443 HTTP/1.1; host: {hostname:}$"
+ barrier b1 sync
+
+ # C7
+ recv
+ expect ~ "^.* uri: CONNECT hostname:8443 HTTP/1.1; host: {hostname:8443}$"
+ barrier b1 sync
+
+ # C8
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C9
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C10
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C11
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C12
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C13
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C14
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C15
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C16
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C17
+ recv
+ barrier b1 sync
+ expect ~ "^.* uri: <BADREQ>; host: $"
+
+ # C18
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C19
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C20
+ recv
+ expect ~ "^.* uri: GET http://hostname/c20 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C21
+ recv
+ expect ~ "^.* uri: GET https://hostname/c21 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C22
+ recv
+ expect ~ "^.* uri: GET http://hostname/c22 HTTP/1.1; host: {hostname:80}$"
+ barrier b1 sync
+
+ # C23
+ recv
+ expect ~ "^.* uri: GET https://hostname/c23 HTTP/1.1; host: {hostname:443}$"
+ barrier b1 sync
+
+ # C24
+ recv
+ expect ~ "^.* uri: GET http://hostname/c24 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C25
+ recv
+ expect ~ "^.* uri: GET https://hostname/c25 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C26
+ recv
+ expect ~ "^.* uri: GET http://hostname/c26 HTTP/1.1; host: {hostname:}$"
+ barrier b1 sync
+
+ # C27
+ recv
+ expect ~ "^.* uri: GET https://hostname/c27 HTTP/1.1; host: {hostname:}$"
+ barrier b1 sync
+
+ # C28
+ recv
+ expect ~ "^.* uri: GET http://hostname/c28 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C29
+ recv
+ expect ~ "^.* uri: GET http://hostname/c29 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C30
+ recv
+ expect ~ "^.* uri: GET https://hostname/c30 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C31
+ recv
+ expect ~ "^.* uri: GET https://hostname/c31 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C32
+ recv
+ expect ~ "^.* uri: GET http:// HTTP/1.1; host: {}$"
+ barrier b1 sync
+
+ # C33
+ recv
+ expect ~ "^.* uri: GET https:// HTTP/1.1; host: {}$"
+ barrier b1 sync
+
+ # C34
+ recv
+ expect ~ "^.* uri: GET http:// HTTP/1.1; host: {}$"
+ barrier b1 sync
+
+ # C35
+ recv
+ expect ~ "^.* uri: GET https:// HTTP/1.1; host: {}$"
+
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ http-request capture req.hdr(host) len 512
+ log-format "uri: %r; host: %hr"
+ log ${S1_addr}:${S1_port} len 2048 local0 debug err
+
+ http-request return status 200
+} -start
+
+# default port 80 with http scheme => should be normalized
+# Be sure userinfo are skipped
+client c1 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://toto:poue@hostname:80/c1" \
+ -hdr "host: hostname:80"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# port 8080 with http scheme => no normalization
+client c2 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname:8080/c2" \
+ -hdr "host: hostname:8080"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# default port 443 with https scheme => should be normalized
+client c3 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname:443/c3" \
+ -hdr "host: hostname:443"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# port 80 with https scheme => no normalization
+client c4 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname:80/c4" \
+ -hdr "host: hostname:80"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# CONNECT on port 80 => should be normalized
+client c5 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:80" \
+ -hdr "host: hostname:80"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+client c5 -connect ${h1_fe_sock} {
+
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:80" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+client c5 -connect ${h1_fe_sock} {
+
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:80" \
+ -hdr "host: hostname:"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# CONNECT on port 443 => should be normalized
+client c6 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:443" \
+ -hdr "host: hostname:443"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+client c6 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:443" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+client c6 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:443" \
+ -hdr "host: hostname:"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# CONNECT on port non-default port => no normalization
+client c7 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:8443" \
+ -hdr "host: hostname:8443"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# host miss-match => error
+client c8 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname1/" \
+ -hdr "host: hostname2"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# port miss-match => error
+client c9 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname:80/" \
+ -hdr "host: hostname:81"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# no host port with a non-default port in abs-uri => error
+client c10 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname:8080/" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# non-default host port with a default in abs-uri => error
+client c11 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname/" \
+ -hdr "host: hostname:81"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# miss-match between host headers => error
+client c12 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname1/" \
+ -hdr "host: hostname1" \
+ -hdr "host: hostname2"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# miss-match between host headers but with a normalization => error
+client c13 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname1/" \
+ -hdr "host: hostname1:80" \
+ -hdr "host: hostname1"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# CONNECT authoriy without port => error
+client c14 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# host miss-match with CONNECT => error
+client c15 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname1:80" \
+ -hdr "host: hostname2:80"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# port miss-match with CONNECT => error
+client c16 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:80" \
+ -hdr "host: hostname:443"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# no host port with non-default port in CONNECT authority => error
+client c17 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:8080" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# no authority => error
+client c18 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "/" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# no authority => error
+client c19 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+
+# default port 80 with http scheme but no port for host value => should be normalized
+client c20 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname:80/c20" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+
+# default port 443 with https scheme but no port for host value => should be normalized
+client c21 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname:443/c21" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+
+# http scheme, no port for the authority but default port for host value => no normalization
+client c22 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname/c22" \
+ -hdr "host: hostname:80"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# https scheme, no port for the authority but default port for host value => no normalization
+client c23 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname/c23" \
+ -hdr "host: hostname:443"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+
+# http scheme, empty port for the authority and no port for host value => should be normalized
+client c24 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname:/c24" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# https scheme, empty port for the authority and no port for host value => should be normalized
+client c25 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname:/c25" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# http scheme, no port for the authority and empty port for host value => no normalization
+client c26 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname/c26" \
+ -hdr "host: hostname:"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# https scheme, no port for the authority and empty port for host value => no normalization
+client c27 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname/c27" \
+ -hdr "host: hostname:"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# http scheme, default port for the authority and empty port for host value => should be normalized
+client c28 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname:80/c28" \
+ -hdr "host: hostname:"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# http scheme, empty port for the authority and default port for host value => should be normalized
+client c29 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname:/c29" \
+ -hdr "host: hostname:80"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# https scheme, default port for the authority and empty port for host value => should be normalized
+client c30 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname:443/c30" \
+ -hdr "host: hostname:"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# https scheme, empty port for the authority and default port for host value => should be normalized
+client c31 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname:/c31" \
+ -hdr "host: hostname:443"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# Strange cases
+client c32 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://:" \
+ -hdr "host: :80"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+
+client c33 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://:" \
+ -hdr "host: :443"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# Strange cases
+client c34 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://:" \
+ -hdr "host: :"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+
+client c35 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://:" \
+ -hdr "host: :"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+syslog S1 -wait
diff --git a/reg-tests/http-messaging/h1_to_h1.vtc b/reg-tests/http-messaging/h1_to_h1.vtc
new file mode 100644
index 0000000..67aba14
--- /dev/null
+++ b/reg-tests/http-messaging/h1_to_h1.vtc
@@ -0,0 +1,301 @@
+varnishtest "HTTP request tests: H1 to H1 (HTX mode supported only for HAProxy >= 1.9)"
+
+# Run it with HAPROXY_PROGRAM=$PWD/haproxy varnishtest -l -k -t 1 "$1"
+
+feature ignore_unknown_macro
+
+server s1 {
+ ##
+ ## Handle GET requests
+ ##
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 1"
+
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 2"
+
+ rxreq
+ expect req.bodylen == 38
+ expect req.body == "this must be delivered, like it or not"
+ txresp \
+ -status 200 \
+ -body "response 3"
+
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 4"
+
+ accept
+
+ ##
+ ## Handle HEAD requests
+ ##
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 1"
+
+ accept
+
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 2"
+
+ accept
+
+ rxreq
+ expect req.bodylen == 38
+ expect req.body == "this must be delivered, like it or not"
+ txresp \
+ -status 200 \
+ -body "response 3"
+
+ accept
+
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 4"
+
+ accept
+
+ ##
+ ## Handle POST requests
+ ##
+ # POST request without body
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 1"
+
+ # POST request without body
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 2"
+
+ # POST request with a body
+ rxreq
+ expect req.bodylen == 12
+ expect req.body == "this is sent"
+ txresp \
+ -status 200 \
+ -body "response 3"
+
+ # POST request without body
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 4"
+} -repeat 3 -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen feh1
+ bind "fd@${feh1}"
+ #bind "fd@${feh2}" proto h2
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+# GET requests
+client c1h1 -connect ${h1_feh1_sock} {
+ # first request is valid
+ txreq \
+ -req "GET" \
+ -url "/test1.html"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 1"
+
+ # second request is valid and advertises C-L:0
+ txreq \
+ -req "GET" \
+ -url "/test2.html" \
+ -hdr "content-length: 0"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 2"
+
+ # third request sends a body with a GET
+ txreq \
+ -req "GET" \
+ -url "/test3.html" \
+ -body "this must be delivered, like it or not"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 3"
+
+ # fourth request is valid and advertises C-L:0, and close, and is
+ # followed by a string "this is not sent\r\n\r\n" which must be
+ # dropped.
+ txreq \
+ -req "GET" \
+ -url "/test4.html" \
+ -hdr "content-length: 0" \
+ -hdr "connection: close"
+ # "this is not sent"
+ sendhex "74787973207973206E6F742073656E740D0A0D0A"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 4"
+
+ # the connection is expected to be closed and no more response must
+ # arrive here.
+ expect_close
+} -run
+
+# HEAD requests
+# Note: for now they fail with varnishtest, which expects the amount of
+# data advertised in the content-length response.
+client c2h1 -connect ${h1_feh1_sock} {
+ # first request is valid
+ txreq \
+ -req "HEAD" \
+ -url "/test11.html"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ # second request is valid and advertises C-L:0
+ txreq \
+ -req "HEAD" \
+ -url "/test12.html" \
+ -hdr "content-length: 0"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ # third request sends a body with a HEAD
+ txreq \
+ -req "HEAD" \
+ -url "/test13.html" \
+ -body "this must be delivered, like it or not"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ # fourth request is valid and advertises C-L:0, and close, and is
+ # followed by a string "this is not sent\r\n\r\n" which must be
+ # dropped.
+ txreq \
+ -req "HEAD" \
+ -url "/test14.html" \
+ -hdr "content-length: 0" \
+ -hdr "connection: close"
+ # "this is not sent"
+ sendhex "74787973207973206E6F742073656E740D0A0D0A"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ # the connection is expected to be closed and no more response must
+ # arrive here.
+ expect_close
+} -run
+
+client c3h1 -connect ${h1_feh1_sock} {
+ # first request is valid
+ txreq \
+ -req "POST" \
+ -url "/test21.html"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 1"
+
+ # second request is valid and advertises C-L:0
+ txreq \
+ -req "POST" \
+ -url "/test22.html" \
+ -hdr "content-length: 0"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 2"
+
+ # third request is valid and advertises (and sends) some contents
+ txreq \
+ -req "POST" \
+ -url "/test23.html" \
+ -body "this is sent"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 3"
+
+ # fourth request is valid and advertises C-L:0, and close, and is
+ # followed by a string "this is not sent\r\n\r\n" which must be
+ # dropped.
+ txreq \
+ -req "POST" \
+ -url "/test24.html" \
+ -hdr "content-length: 0" \
+ -hdr "connection: close"
+ # "this is not sent"
+ sendhex "74787973207973206E6F742073656E740D0A0D0A"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 4"
+
+ # the connection is expected to be closed and no more response must
+ # arrive here.
+ expect_close
+} -run
+
+client c4h1 -connect ${h1_feh1_sock} {
+ # this request is invalid and advertises an invalid C-L ending with an
+ # empty value, which results in a stream error.
+ txreq \
+ -req "GET" \
+ -url "/test31.html" \
+ -hdr "content-length: 0," \
+ -hdr "connection: close"
+ rxresp
+ expect resp.status == 400
+ expect_close
+} -run
+
+client c5h1 -connect ${h1_feh1_sock} {
+ # this request is invalid and advertises an empty C-L, which results
+ # in a stream error.
+ txreq \
+ -req "GET" \
+ -url "/test41.html" \
+ -hdr "content-length:" \
+ -hdr "connection: close"
+ rxresp
+ expect resp.status == 400
+ expect_close
+} -run
diff --git a/reg-tests/http-messaging/h2_desync_attacks.vtc b/reg-tests/http-messaging/h2_desync_attacks.vtc
new file mode 100644
index 0000000..112bc60
--- /dev/null
+++ b/reg-tests/http-messaging/h2_desync_attacks.vtc
@@ -0,0 +1,167 @@
+# This test ensures that h2 requests with invalid pseudo-headers are properly
+# rejected. Also, the host header must be ignored if authority is present. This
+# is necessary to avoid http/2 desync attacks through haproxy as described here
+# https://portswigger.net/research/http2
+
+varnishtest "h2 desync attacks"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ expect req.http.host == "hostname"
+ txresp
+} -start
+
+# haproxy frontend
+haproxy hap -conf {
+ defaults
+ mode http
+
+ listen feSrvH1
+ bind "fd@${feSrvH1}"
+ http-request return status 200
+
+ listen feSrvH2
+ bind "fd@${feSrvH2}" proto h2
+ http-request return status 200
+
+ listen fe1
+ bind "fd@${fe1}" proto h2
+ server srv-hapSrv "${hap_feSrvH1_addr}:${hap_feSrvH1_port}"
+
+ listen fe2
+ bind "fd@${fe2}" proto h2
+ server srv-hapSrv "${hap_feSrvH2_addr}:${hap_feSrvH2_port}" proto h2
+
+ listen fe3
+ bind "fd@${fe3}" proto h2
+ server s1 "${s1_addr}:${s1_port}"
+} -start
+
+# valid request
+client c1 -connect ${hap_fe1_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -method "GET" \
+ -scheme "http" \
+ -url "/"
+ rxresp
+ expect resp.status == 200
+ } -run
+} -run
+
+# valid request
+client c2 -connect ${hap_fe2_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -method "GET" \
+ -scheme "http" \
+ -url "/"
+ rxresp
+ expect resp.status == 200
+ } -run
+} -run
+
+# invalid path
+client c3-path -connect ${hap_fe1_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -method "GET" \
+ -scheme "http" \
+ -url "hello-world"
+ rxrst
+ } -run
+} -run
+
+# invalid scheme
+client c4-scheme -connect ${hap_fe1_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -method "GET" \
+ -scheme "http://localhost/?" \
+ -url "/"
+ rxresp
+ expect resp.status == 400
+ } -run
+} -run
+
+# invalid method
+client c5-method -connect ${hap_fe2_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -method "GET?" \
+ -scheme "http" \
+ -url "/"
+ rxresp
+ expect resp.status == 400
+ } -run
+} -run
+
+# different authority and host headers
+# in this case, host should be ignored in favor of the authority
+client c6-host-authority -connect ${hap_fe3_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -method "GET" \
+ -scheme "http" \
+ -url "/" \
+ -hdr ":authority" "hostname" \
+ -hdr "host" "other_host"
+ } -run
+} -run
+
+server s1 -wait
diff --git a/reg-tests/http-messaging/h2_to_h1.vtc b/reg-tests/http-messaging/h2_to_h1.vtc
new file mode 100644
index 0000000..637b664
--- /dev/null
+++ b/reg-tests/http-messaging/h2_to_h1.vtc
@@ -0,0 +1,324 @@
+varnishtest "HTTP request tests: H2 to H1 (HTX and legacy mode)"
+
+# Run it with HAPROXY_PROGRAM=$PWD/haproxy varnishtest -l -k -t 1 "$1"
+
+feature ignore_unknown_macro
+
+# synchronize requests between streams
+barrier b1 cond 2 -cyclic
+barrier b2 cond 2 -cyclic
+barrier b3 cond 2 -cyclic
+barrier b4 cond 2 -cyclic
+barrier b5 cond 2 -cyclic
+barrier b6 cond 2 -cyclic
+
+server s1 {
+ rxreq
+ txresp \
+ -status 200 \
+ -body "response 1"
+
+ barrier b2 sync
+ rxreq
+ txresp \
+ -status 200 \
+ -body "response 2"
+
+ barrier b3 sync
+ rxreq
+ txresp \
+ -status 200 \
+ -body "response 3"
+
+ barrier b4 sync
+ # the next request is never received
+
+ barrier b5 sync
+ # the next request is never received
+
+ barrier b6 sync
+ # the next request is never received
+} -repeat 2 -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ #log stdout format raw daemon
+ mode http
+ option http-buffer-request
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen feh1
+ bind "fd@${feh1}"
+ bind "fd@${feh2}" proto h2
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1h2 -connect ${h1_feh2_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ # first request is valid
+ stream 1 {
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/test1.html"
+ rxhdrs
+ expect resp.status == 200
+ rxdata -all
+ expect resp.body == "response 1"
+ } -run
+
+ # second request is valid and advertises C-L:0
+ stream 3 {
+ barrier b2 sync
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/test2.html" \
+ -hdr "content-length" "0"
+ rxhdrs
+ expect resp.status == 200
+ rxdata -all
+ expect resp.body == "response 2"
+ } -run
+
+ # third request sends a body with a GET
+ stream 5 {
+ barrier b3 sync
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/test3.html" \
+ -nostrend \
+ -body "this must be delivered, like it or not"
+ rxwinup
+ rxhdrs
+ expect resp.status == 200
+ rxdata -all
+ expect resp.body == "response 3"
+ } -run
+
+ # fourth request is valid and advertises C-L:2, and close, and is
+ # followed by a string "this is not sent\r\n\r\n" which causes a
+ # stream error of type PROTOCOL_ERROR.
+ stream 7 {
+ barrier b4 sync
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/test4.html" \
+ -hdr "content-length" "2" \
+ -nostrend
+ txdata -data "this is sent and ignored"
+ rxrst
+ } -run
+
+ # fifth request is invalid and advertises an invalid C-L ending with an
+ # empty value, which results in a stream error.
+ stream 9 {
+ barrier b5 sync
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/test5.html" \
+ -hdr "content-length" "0," \
+ -nostrend
+ rxrst
+ } -run
+
+ # sixth request is invalid and advertises an empty C-L, which results
+ # in a stream error.
+ stream 11 {
+ barrier b6 sync
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/test6.html" \
+ -hdr "content-length" "" \
+ -nostrend
+ rxrst
+ } -run
+} -run
+
+# HEAD requests : don't work well yet
+#client c2h2 -connect ${h1_feh2_sock} {
+# txpri
+# stream 0 {
+# txsettings
+# rxsettings
+# txsettings -ack
+# rxsettings
+# expect settings.ack == true
+# } -run
+#
+# # first request is valid
+# stream 1 {
+# txreq \
+# -req "HEAD" \
+# -scheme "https" \
+# -url "/test11.html"
+# rxhdrs
+# expect resp.status == 200
+# rxdata -all
+# expect resp.bodylen == 0
+# } -run
+#
+# # second request is valid and advertises C-L:0
+# stream 3 {
+# barrier b2 sync
+# txreq \
+# -req "HEAD" \
+# -scheme "https" \
+# -url "/test12.html" \
+# -hdr "content-length" "0"
+# rxhdrs
+# expect resp.status == 200
+# rxdata -all
+# expect resp.bodylen == 0
+# } -run
+#
+# # third request sends a body with a GET
+# stream 5 {
+# barrier b3 sync
+# txreq \
+# -req "HEAD" \
+# -scheme "https" \
+# -url "/test13.html" \
+# -nostrend \
+# -body "this must be delivered, like it or not"
+# rxwinup
+# rxhdrs
+# expect resp.status == 200
+# rxdata -all
+# expect resp.bodylen == 0
+# } -run
+#
+# # fourth request is valid and advertises C-L:0, and close, and is
+# # followed by a string "this is not sent\r\n\r\n" which must be
+# # dropped.
+# stream 7 {
+# barrier b4 sync
+# txreq \
+# -req "HEAD" \
+# -scheme "https" \
+# -url "/test14.html" \
+# -hdr "content-length" "0" \
+# -nostrend
+# txdata -data "this is sent and ignored"
+# rxwinup
+# rxhdrs
+# expect resp.status == 200
+# rxdata -all
+# expect resp.bodylen == 0
+# } -run
+#} -run
+
+# POST requests
+client c3h2 -connect ${h1_feh2_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ # first request is valid
+ stream 1 {
+ txreq \
+ -req "POST" \
+ -scheme "https" \
+ -url "/test21.html"
+ rxhdrs
+ expect resp.status == 200
+ rxdata -all
+ expect resp.body == "response 1"
+ } -run
+
+ # second request is valid and advertises C-L:0
+ stream 3 {
+ barrier b2 sync
+ txreq \
+ -req "POST" \
+ -scheme "https" \
+ -url "/test22.html" \
+ -hdr "content-length" "0"
+ rxhdrs
+ expect resp.status == 200
+ rxdata -all
+ expect resp.body == "response 2"
+ } -run
+
+ # third request sends a body with a GET
+ stream 5 {
+ barrier b3 sync
+ txreq \
+ -req "POST" \
+ -scheme "https" \
+ -url "/test23.html" \
+ -nostrend \
+ -body "this must be delivered, like it or not"
+ rxwinup
+ rxhdrs
+ expect resp.status == 200
+ rxdata -all
+ expect resp.body == "response 3"
+ } -run
+
+ # fourth request is valid and advertises C-L:2, and close, and is
+ # followed by a string "this is not sent\r\n\r\n" which results
+ # in a stream error.
+ stream 7 {
+ barrier b4 sync
+ txreq \
+ -req "POST" \
+ -scheme "https" \
+ -url "/test24.html" \
+ -hdr "content-length" "2" \
+ -nostrend
+ txdata -data "this is sent and ignored"
+ rxrst
+ } -run
+
+ # fifth request is invalid and advertises invalid C-L ending with an
+ # empty value, which results in a stream error.
+ stream 9 {
+ barrier b5 sync
+ txreq \
+ -req "POST" \
+ -scheme "https" \
+ -url "/test25.html" \
+ -hdr "content-length" "0," \
+ -nostrend
+ rxrst
+ } -run
+
+ # sixth request is invalid and advertises an empty C-L, which results
+ # in a stream error.
+ stream 11 {
+ barrier b6 sync
+ txreq \
+ -req "POST" \
+ -scheme "https" \
+ -url "/test26.html" \
+ -hdr "content-length" "" \
+ -nostrend
+ rxrst
+ } -run
+} -run
diff --git a/reg-tests/http-messaging/http_abortonclose.vtc b/reg-tests/http-messaging/http_abortonclose.vtc
new file mode 100644
index 0000000..ea57f3d
--- /dev/null
+++ b/reg-tests/http-messaging/http_abortonclose.vtc
@@ -0,0 +1,226 @@
+varnishtest "A test for the abortonclose option (H1 only)"
+feature ignore_unknown_macro
+
+# NOTE : This test may fail if too many vtest are running in parallel because
+# the port reserved for closed s1 server may be reused by another vtest
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.2-dev0)'"
+#REGTEST_TYPE=slow
+
+# b0 : Wait s1 was detected as DOWN to be sure it is stopped
+# b1 : Don't send /c4 before /c3 was received by s2 server
+# b2 : Used to receive syslog messages in the right order
+# b3 : finish c3 before s2
+
+barrier b0 cond 3 -cyclic
+barrier b1 cond 2 -cyclic
+barrier b2 cond 2 -cyclic
+barrier b3 cond 2 -cyclic
+
+server s1 {
+} -start
+server s1 -break
+
+server s2 {
+ rxreq
+
+ # unlock c4
+ barrier b1 sync
+
+ # wait end of c3
+ barrier b3 sync
+
+ expect_close
+} -start
+
+syslog S1 -level info {
+ recv alert
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: Server check/srv1 is DOWN.*"
+ barrier b0 sync
+
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1_1/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* 503 .* - - SC-- .* .* \"GET /c1 HTTP/1\\.1\""
+ barrier b2 sync
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1_2/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* -1 .* - - CC-- .* .* \"GET /c2 HTTP/1\\.1\""
+ barrier b2 sync
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be2/<NOSRV> [0-9]*/[0-9]*/-1/-1/[0-9]* -1 .* - - CQ-- .* .* \"GET /c4 HTTP/1\\.1\""
+ barrier b2 sync
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be2/srv1 [0-9]*/[0-9]*/[0-9]*/-1/[0-9]* 400 .* - - CH-- .* .* \"GET /c3 HTTP/1\\.1\""
+} -start
+
+syslog S2 -level info {
+ recv alert
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: Server check/srv1 is DOWN.*"
+ barrier b0 sync
+
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* -1 .* - - CC-- .* .* \"GET /c5 HTTP/1\\.1\""
+ barrier b2 sync
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be2/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* 503 .* - - SC-- .* .* \"GET /c6 HTTP/1\\.1\""
+} -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ option abortonclose
+ retries 1
+ timeout client 10s
+ timeout server 10s
+ timeout connect 100ms
+ timeout queue 5s
+
+ frontend fe1
+ option httplog
+ log ${S1_addr}:${S1_port} local0 debug err
+ bind "fd@${fe1}"
+ use_backend be1_1 if { path /c1 }
+ use_backend be1_2 if { path /c2 }
+
+ frontend fe2
+ option httplog
+ log ${S1_addr}:${S1_port} local0 debug err
+ bind "fd@${fe2}"
+ use_backend be2
+
+ backend be1_1
+ server srv1 ${s1_addr}:${s1_port}
+
+ backend be1_2
+ timeout connect 1s
+ retries 10
+ server srv1 ${s1_addr}:${s1_port}
+
+ backend be2
+ server srv1 ${s2_addr}:${s2_port} maxconn 1
+
+ backend check
+ server srv1 ${s1_addr}:${s1_port} check
+ log ${S1_addr}:${S1_port} local0 debug alert
+} -start
+
+
+haproxy h2 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ retries 1
+ timeout client 10s
+ timeout server 10s
+ timeout connect 100ms
+ timeout queue 5s
+
+ frontend fe1
+ option httplog
+ log ${S2_addr}:${S2_port} local0 debug err
+ bind "fd@${fe1}"
+ use_backend be1
+
+ backend be1
+ option abortonclose
+ server srv1 ${s1_addr}:${s1_port}
+
+ defaults
+ mode http
+ option abortonclose
+ retries 1
+ timeout client 10s
+ timeout server 10s
+ timeout connect 100ms
+ timeout queue 5s
+
+ frontend fe2
+ option httplog
+ log ${S2_addr}:${S2_port} local0 debug err
+ bind "fd@${fe2}"
+ use_backend be2
+
+ backend be2
+ no option abortonclose
+ server srv1 ${s1_addr}:${s1_port}
+
+ backend check
+ server srv1 ${s1_addr}:${s1_port} check
+ log ${S2_addr}:${S2_port} local0 debug alert
+} -start
+
+
+# Wait s1 was detected as DOWN
+barrier b0 sync
+
+# No server, wait all connection retries : SC--
+client c1 -connect ${h1_fe1_sock} {
+ txreq -url /c1
+ rxresp
+ expect resp.status == 503
+} -run
+
+# Wait c1 log entry
+barrier b2 sync
+
+# No server, abort during connections retries : CC--
+client c2 -connect ${h1_fe1_sock} {
+ txreq -url /c2
+} -run
+
+# Wait c2 log entry
+barrier b2 sync
+
+# server with maxconn=1, abort waiting the server reply : CH--
+client c3 -connect ${h1_fe2_sock} {
+ txreq -url /c3
+
+ # Wait c4 log entry
+ barrier b2 sync
+} -start
+
+# server with maxconn=1, abort waiting in the queue (c3 still attached) : CQ--
+client c4 -connect ${h1_fe2_sock} {
+ # Wait s2 receives c3 request
+ barrier b1 sync
+
+ txreq -url /c4
+ delay .2
+} -run
+
+client c3 -wait
+
+# unlock s2
+barrier b3 sync
+
+syslog S1 -wait
+
+
+# No server, abort during connections retries : CC--
+# abortonclose on backend only
+client c5 -connect ${h2_fe1_sock} {
+ txreq -url /c5
+} -run
+
+# Wait c5 log entry
+barrier b2 sync
+
+# No server, wait all connection retries : SC--
+# abortonclose in defaults section but disabled by backend
+client c6 -connect ${h2_fe2_sock} {
+ txreq -url /c6
+ rxresp
+ expect resp.status == 503
+} -run
+
+
+syslog S2 -wait
diff --git a/reg-tests/http-messaging/http_bodyless_response.vtc b/reg-tests/http-messaging/http_bodyless_response.vtc
new file mode 100644
index 0000000..6b53bc4
--- /dev/null
+++ b/reg-tests/http-messaging/http_bodyless_response.vtc
@@ -0,0 +1,128 @@
+varnishtest "A test to be sure payload is skipped for bodyless responses"
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+
+server s1 {
+ rxreq
+ txresp \
+ -status 200 \
+ -body "skipped data"
+
+ rxreq
+ txresp \
+ -status 200 \
+ -bodylen 50000
+
+ rxreq
+ txresp \
+ -status 200 \
+ -nolen -hdr "Transfer-Encoding: chunked"
+ chunkedlen 15
+ chunkedlen 1024
+ chunkedlen 4048
+ chunkedlen 50000
+ chunkedlen 0
+
+ rxreq
+ txresp \
+ -status 200 \
+ -body "last response"
+} -repeat 3 -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen fe1
+ bind "fd@${fe1}"
+ # Rewrite the method to be sure to get the response payload
+ # on the server side
+ http-request set-method GET
+ server s1 ${s1_addr}:${s1_port}
+
+ listen int
+ bind "fd@${int}" proto h2
+ # Rewrite the method to be sure to get the response payload
+ # on the server side
+ http-request set-method GET
+ server s1 ${s1_addr}:${s1_port}
+ #server s1 ${h1_fe1_addr}:${h1_fe1_port}
+
+ listen fe2
+ bind "fd@${fe2}"
+ server s1 ${h1_int_addr}:${h1_int_port} proto h2
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq \
+ -req "HEAD" \
+ -url "/req1"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ txreq \
+ -req "HEAD" \
+ -url "/req2"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ txreq \
+ -req "HEAD" \
+ -url "/req3"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ # The last one have a body and validate the connection was not closed
+ # unexpectedly and no payload was received for previous requests
+ txreq \
+ -req "GET" \
+ -url "/req4"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "last response"
+} -run
+
+client c2 -connect ${h1_fe2_sock} {
+ txreq \
+ -req "HEAD" \
+ -url "/req1"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ txreq \
+ -req "HEAD" \
+ -url "/req2"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ txreq \
+ -req "HEAD" \
+ -url "/req3"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ # The last one have a body and validate the connection was not closed
+ # unexpectedly and no payload was received for previous requests
+ txreq \
+ -req "GET" \
+ -url "/req4"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "last response"
+} -run
diff --git a/reg-tests/http-messaging/http_bodyless_spliced_response.vtc b/reg-tests/http-messaging/http_bodyless_spliced_response.vtc
new file mode 100644
index 0000000..73916f2
--- /dev/null
+++ b/reg-tests/http-messaging/http_bodyless_spliced_response.vtc
@@ -0,0 +1,89 @@
+varnishtest "A test to be sure payload is skipped for bodyless responses when splicing is used"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(LINUX_SPLICE)'"
+feature cmd "$HAPROXY_PROGRAM $HAPROXY_ARGS -cc 'enabled(FAST-FORWARD)'"
+feature cmd "$HAPROXY_PROGRAM $HAPROXY_ARGS -cc 'enabled(SPLICE)'"
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+
+server s1 {
+ rxreq
+ txresp \
+ -status 200 \
+ -body "skipped data"
+
+ rxreq
+ txresp \
+ -status 200 \
+ -bodylen 50000
+
+ rxreq
+ txresp \
+ -status 200 \
+ -nolen -hdr "Transfer-Encoding: chunked"
+ chunkedlen 15
+ chunkedlen 1024
+ chunkedlen 4048
+ chunkedlen 50000
+ chunkedlen 0
+
+ rxreq
+ txresp \
+ -status 200 \
+ -body "last response"
+} -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen fe1
+ bind "fd@${fe1}"
+ # Rewrite the method to be sure to get the response payload
+ # on the server side
+ http-request set-method GET
+ option splice-response
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq \
+ -req "HEAD" \
+ -url "/req1"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ txreq \
+ -req "HEAD" \
+ -url "/req2"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ txreq \
+ -req "HEAD" \
+ -url "/req3"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ # The last one have a body and validate the connection was not closed
+ # unexpectedly and no payload was received for previous requests
+ txreq \
+ -req "GET" \
+ -url "/req4"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "last response"
+} -run
diff --git a/reg-tests/http-messaging/http_msg_full_on_eom.vtc b/reg-tests/http-messaging/http_msg_full_on_eom.vtc
new file mode 100644
index 0000000..2edba7d
--- /dev/null
+++ b/reg-tests/http-messaging/http_msg_full_on_eom.vtc
@@ -0,0 +1,62 @@
+varnishtest "cannot add the HTX EOM block because the buffer is full"
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.2
+#REQUIRE_VERSION_BELOW=2.4
+#REGTEST_TYPE=devel
+
+# This test checks that an HTTP message is properly processed when we failed to
+# add the HTX EOM block in an HTX message during the parsing because the buffer
+# is full. Some space must be released in the buffer to make it possible. This
+# requires an extra pass in the H1 multiplexer. Here, we must be sure the mux is
+# called while there is no more incoming data.
+
+server s1 {
+ rxreq
+ expect req.bodylen == 15200
+ txresp -bodylen 15220
+} -start
+
+syslog S -level info {
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/srv1 [0-9]*/[0-9]*/[0-9]*/[0-9]*/[0-9]* 200 .* - - ---- .* .* \"GET / HTTP/1\\.1\""
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.bufsize 16384
+ tune.maxrewrite 1024
+
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ tcp-response inspect-delay 100ms
+ tcp-response content accept if { res.len gt 15272 }
+ tcp-response content reject
+
+ http-response deny if { internal.htx.has_eom -m bool } or { internal.htx.free_data gt 1024 }
+ server srv1 ${s1_addr}:${s1_port}
+
+ frontend fe1
+ option httplog
+ option http-buffer-request
+ log ${S_addr}:${S_port} local0 debug err
+ bind "fd@${fe1}"
+ http-request deny if ! { req.body_len eq 15200 } or { internal.htx.has_eom -m bool } or { internal.htx.free_data gt 1024 }
+ use_backend be1
+} -start
+
+haproxy h1 -cli {
+ send "trace h1 sink stderr; trace h1 level developer; trace h1 verbosity complete; trace h1 start now"
+}
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -bodylen 15200
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 15220
+} -run
diff --git a/reg-tests/http-messaging/http_request_buffer.vtc b/reg-tests/http-messaging/http_request_buffer.vtc
new file mode 100644
index 0000000..302db4a
--- /dev/null
+++ b/reg-tests/http-messaging/http_request_buffer.vtc
@@ -0,0 +1,135 @@
+varnishtest "A test for http-request-buffer option"
+feature ignore_unknown_macro
+
+
+# This test checks HTTP request buffering feature.
+# We run one server s1 which can serve only one client (no -repeat argument here).
+# c1 client uses a malformed request which is not transferred to s1 server
+# thanks to "http-buffer-request". If this was the case, c2 client
+# could not connect to s1 server and this would lead to make this test fail.
+
+barrier b1 cond 2 -cyclic
+
+server s1 {
+ rxreq
+ expect req.bodylen == 257
+ txresp
+
+ accept
+
+ rxreq
+ expect req.bodylen == 2
+ txresp
+} -start
+
+syslog S -level info {
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 fe1/<NOSRV> .* 408 .* - - cR-- .* .* \"GET /this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url HTTP/1\\.1\""
+ barrier b1 sync
+
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/srv1 [0-9]*/[0-9]*/[0-9]*/[0-9]*/[0-9]* 200 .* - - ---- .* .* \"GET / HTTP/1\\.1\""
+ barrier b1 sync
+
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be1/srv1 [0-9]*/[0-9]*/[0-9]*/[0-9]*/[0-9]* 200 .* - - ---- .* .* \"POST /1 HTTP/1\\.1\""
+ barrier b1 sync
+
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be1/<NOSRV> [0-9]*/-1/-1/-1/[0-9]* 400 .* - - CR-- .* .* \"POST /2 HTTP/1\\.1\""
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout client 100
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ server srv1 ${s1_addr}:${s1_port}
+
+ frontend fe1
+ option httplog
+ option http-buffer-request
+ log ${S_addr}:${S_port} local0 debug err
+ bind "fd@${fe1}"
+ use_backend be1
+
+ frontend fe2
+ timeout client 10s
+ option httplog
+ option http-buffer-request
+ log ${S_addr}:${S_port} local0 debug err
+ bind "fd@${fe2}"
+ use_backend be1
+} -start
+
+# 1 byte of the payload is missing.
+# ==> The request must timed out with a 408 response
+client c1 -connect ${h1_fe1_sock} {
+ send "GET"
+ send " "
+ send "/this-is-a-long-url"
+ send "-this-is-a-long-url"
+ send "-this-is-a-long-url"
+ send "-this-is-a-long-url"
+ send "-this-is-a-long-url"
+ send "-this-is-a-long-url"
+ send "-this-is-a-long-url"
+ send " HTT"
+ send "P/1.1"
+ send "\r"
+ send "\n"
+ send "Content-Length: 209\r\n\r\n"
+ send "abcdefghijklmnopqrstuvwxyz"
+ send "abcdefghijklmnopqrstuvwxyz"
+ send "abcdefghijklmnopqrstuvwxyz"
+ send "abcdefghijklmnopqrstuvwxyz"
+ send "abcdefghijklmnopqrstuvwxyz"
+ send "abcdefghijklmnopqrstuvwxyz"
+ send "abcdefghijklmnopqrstuvwxyz"
+ send "abcdefghijklmnopqrstuvwxyz"
+ rxresp
+ expect resp.status == 408
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# Payload is fully sent
+# ==> Request must be sent to the server. A 200 must be received
+client c2 -connect ${h1_fe1_sock} {
+ txreq -bodylen 257
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# Payload is fully sent in 2 steps (with a small delay, smaller than the client
+# timeout) and split on a chunk size.
+# ==> Request must be sent to the server. A 200 must be received
+client c3 -connect ${h1_fe2_sock} {
+ send "POST /1 HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n1\r\n1\r\n1"
+ delay 0.01
+ send "\r\n1\r\n0\r\n\r\n"
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# Last CRLF of the request payload is missing but payload is sent in 2 steps
+# (with a small delay, smaller than the client timeout) and split on a chunk
+# size. The client aborts before sending the last CRLF.
+# ==> Request must be handled as an error with 'CR--' termination state.
+client c4 -connect ${h1_fe2_sock} {
+ send "POST /2 HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n1\r\n1\r\n1"
+ delay 0.01
+ send "\r\n1\r\n0\r\n"
+} -run
+
+syslog S -wait
diff --git a/reg-tests/http-messaging/http_splicing.vtc b/reg-tests/http-messaging/http_splicing.vtc
new file mode 100644
index 0000000..e86680b
--- /dev/null
+++ b/reg-tests/http-messaging/http_splicing.vtc
@@ -0,0 +1,77 @@
+# This reg-test checks splicing support for the H1 multiplexer
+
+varnishtest "A test to validate h1 splicing support"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.6-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(LINUX_SPLICE)'"
+feature cmd "$HAPROXY_PROGRAM $HAPROXY_ARGS -cc 'enabled(FAST-FORWARD)'"
+feature cmd "$HAPROXY_PROGRAM $HAPROXY_ARGS -cc 'enabled(SPLICE)'"
+feature ignore_unknown_macro
+
+#REGTEST_TYPE=slow
+
+server s1 {
+ rxreq
+ expect req.http.content-length == "1048576"
+ expect req.bodylen == 1048576
+ txresp -status 200 -bodylen 1048576
+} -start
+
+server s2 {
+ rxreq
+ txresp -status 200 -nolen -bodylen 1048576
+} -start
+
+haproxy h1 -conf {
+ global
+ log stderr len 4096 local0 debug
+
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ option splice-request
+ option splice-response
+ log global
+ option httplog
+
+ listen li1
+ bind "fd@${li1}"
+ id 10
+ server srv1 ${s1_addr}:${s1_port}
+
+ listen li2
+ bind "fd@${li2}"
+ id 20
+ server srv2 ${s2_addr}:${s2_port}
+} -start
+
+
+client c1 -connect ${h1_li1_sock} {
+ txreq -method POST -url "/" -bodylen 1048576
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == "1048576"
+ expect resp.bodylen == 1048576
+} -run
+
+client c2 -connect ${h1_li2_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == <undef>
+ expect resp.bodylen == 1048576
+} -run
+
+haproxy h1 -cli {
+ send "show stat typed"
+ expect ~ "F.10.0.[[:digit:]]+.h1_spliced_bytes_in.1:MCP:u64:[1-9][[:digit:]]+\nF.10.0.[[:digit:]]+.h1_spliced_bytes_out.1:MCP:u64:[1-9][[:digit:]]+"
+ send "show stat typed"
+ expect ~ "B.10.0.[[:digit:]]+.h1_spliced_bytes_in.1:MCP:u64:[1-9][[:digit:]]+\nB.10.0.[[:digit:]]+.h1_spliced_bytes_out.1:MCP:u64:[1-9][[:digit:]]+"
+
+ send "show stat typed"
+ expect ~ "F.20.0.[[:digit:]]+.h1_spliced_bytes_in.1:MCP:u64:0\nF.20.0.[[:digit:]]+.h1_spliced_bytes_out.1:MCP:u64:[1-9][[:digit:]]+"
+ send "show stat typed"
+ expect ~ "B.20.0.[[:digit:]]+.h1_spliced_bytes_in.1:MCP:u64:[1-9][[:digit:]]+\nB.20.0.[[:digit:]]+.h1_spliced_bytes_out.1:MCP:u64:0"
+}
diff --git a/reg-tests/http-messaging/http_splicing_chunk.vtc b/reg-tests/http-messaging/http_splicing_chunk.vtc
new file mode 100644
index 0000000..e2e9f32
--- /dev/null
+++ b/reg-tests/http-messaging/http_splicing_chunk.vtc
@@ -0,0 +1,74 @@
+# This reg-test checks splicing support for chunked message in the H1 multiplexer
+
+varnishtest "A test to validate h1 splicing support for chunked messages"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.9-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(LINUX_SPLICE)'"
+feature cmd "$HAPROXY_PROGRAM $HAPROXY_ARGS -cc 'enabled(FAST-FORWARD)'"
+feature cmd "$HAPROXY_PROGRAM $HAPROXY_ARGS -cc 'enabled(SPLICE)'"
+feature ignore_unknown_macro
+
+#REGTEST_TYPE=slow
+
+server s1 {
+ rxreq
+ expect req.http.content-length == <undef>
+ expect req.http.transfer-encoding == "chunked"
+ expect req.bodylen == 500000
+
+ txresp -status 200 -nolen \
+ -hdr "Transfer-Encoding: chunked"
+ chunkedlen 204800
+ chunkedlen 204800
+ chunkedlen 204800
+ chunkedlen 204800
+ chunkedlen 204800
+ chunkedlen 0
+} -start
+
+haproxy h1 -conf {
+ global
+ log stderr len 4096 local0 debug
+
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ option splice-request
+ option splice-response
+ log global
+ option httplog
+
+ listen li1
+ bind "fd@${li1}"
+ id 10
+ server srv1 ${s1_addr}:${s1_port}
+} -start
+
+
+client c1 -connect ${h1_li1_sock} {
+ txreq -method POST -url "/" -nolen \
+ -hdr "Transfer-Encoding: chunked"
+ chunkedlen 10000
+ chunkedlen 10000
+ chunkedlen 10000
+ chunkedlen 10000
+ chunkedlen 10000
+ chunkedlen 50000
+ chunkedlen 100000
+ chunkedlen 300000
+ chunkedlen 0
+
+ rxresp
+ expect resp.http.content-length == <undef>
+ expect resp.http.transfer-encoding == "chunked"
+ expect resp.bodylen == 1024000
+} -run
+
+haproxy h1 -cli {
+ send "show stat typed"
+ expect ~ "F.10.0.[[:digit:]]+.h1_spliced_bytes_in.1:MCP:u64:[1-9][[:digit:]]+\nF.10.0.[[:digit:]]+.h1_spliced_bytes_out.1:MCP:u64:[1-9][[:digit:]]+"
+ send "show stat typed"
+ expect ~ "B.10.0.[[:digit:]]+.h1_spliced_bytes_in.1:MCP:u64:[1-9][[:digit:]]+\nB.10.0.[[:digit:]]+.h1_spliced_bytes_out.1:MCP:u64:[1-9][[:digit:]]+"
+}
diff --git a/reg-tests/http-messaging/http_transfer_encoding.vtc b/reg-tests/http-messaging/http_transfer_encoding.vtc
new file mode 100644
index 0000000..322dfe2
--- /dev/null
+++ b/reg-tests/http-messaging/http_transfer_encoding.vtc
@@ -0,0 +1,202 @@
+varnishtest "A test to validate Transfer-Encoding header conformance to the spec"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ expect req.http.content-length == <undef>
+ expect req.http.transfer-encoding == "chunked"
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp -status 200
+
+ accept
+ rxreq
+ expect req.http.content-length == <undef>
+ expect req.http.transfer-encoding == "chunked"
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp -status 200
+
+ accept
+ rxreq
+ send "HTTP/1.0 200 Ok\r\n"
+ send "Transfer-Encoding: chunked\r\n\r\n"
+ send "0\r\n\r\n"
+
+ accept
+ rxreq
+ send "HTTP/1.1 200 Ok\r\n"
+ send "Transfer-Encoding: chunked\r\n"
+ send "Content-Length: 30\r\n\r\n"
+ send "0\r\n\r\nResponse splitting attach"
+
+ accept
+ rxreq
+ expect req.url == "/1"
+ expect req.http.transfer-encoding == "chunked"
+ expect req.http.te == "trailers"
+ txresp
+
+ rxreq
+ expect req.url == "/2"
+ expect req.http.transfer-encoding == "chunked"
+ expect req.http.te == <undef>
+ txresp
+
+ rxreq
+ expect req.url == "/3"
+ expect req.http.transfer-encoding == "chunked"
+ expect req.http.te == <undef>
+ txresp
+} -start
+
+server s2 {
+ rxreq
+ txresp -nolen \
+ -hdr "Transfer-Encoding: chunked, chunked" \
+ -body "0\r\n\r\n"
+
+ accept
+ rxreq
+ txresp -nolen \
+ -hdr "Transfer-Encoding: chunked, gzip, chunked" \
+ -body "0\r\n\r\n"
+
+ accept
+ rxreq
+ txresp -nolen \
+ -hdr "Transfer-Encoding: chunked, gzip" \
+ -body "0\r\n\r\n"
+
+ accept
+ rxreq
+ txresp \
+ -hdr "Transfer-Encoding: gzip"
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen fe1
+ bind "fd@${fe1}"
+ server s1 ${s1_addr}:${s1_port}
+
+ listen fe2
+ bind "fd@${fe2}"
+ server s2 ${s2_addr}:${s2_port}
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -method POST -nolen \
+ -hdr "Transfer-Encoding: chunked" \
+ -hdr "Content-Length: 31" \
+ -body "0\r\n\r\nGET /smuggled HTTP/1.1\r\n\r\n"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.connection == "close"
+ expect_close
+} -run
+
+client c2 -connect ${h1_fe1_sock} {
+ send "POST / HTTP/1.0\r\n"
+ send "Transfer-Encoding: chunked\r\n\r\n"
+ send "0\r\n\r\n"
+ rxresp
+ expect resp.status == 200
+ expect_close
+} -run
+
+client c3 -connect ${h1_fe1_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == <undef>
+ expect resp.http.transfer-encoding == "chunked"
+ expect resp.bodylen == 0
+ expect resp.body == ""
+ expect_close
+} -run
+
+client c4 -connect ${h1_fe1_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == <undef>
+ expect resp.http.transfer-encoding == "chunked"
+ expect resp.bodylen == 0
+ expect resp.body == ""
+} -run
+
+client c5 -connect ${h1_fe1_sock} {
+ txreq -method POST -url "/1" -nolen \
+ -hdr "Transfer-Encoding: chunked" \
+ -hdr "TE: trailers, gzip" \
+ -body "0\r\n\r\n"
+ rxresp
+ expect resp.status == 200
+
+ txreq -method POST -url "/2" -nolen \
+ -hdr "Transfer-Encoding: chunked" \
+ -hdr "TE: gzip" \
+ -body "0\r\n\r\n"
+ rxresp
+ expect resp.status == 200
+
+ txreq -method POST -url "/3" -nolen \
+ -hdr "Transfer-Encoding: chunked" \
+ -hdr "TE: trailers;q=0.5" \
+ -body "0\r\n\r\n"
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c6 -connect ${h1_fe1_sock} {
+ txreq -nolen \
+ -hdr "Transfer-Encoding: chunked, chunked" \
+ -body "0\r\n\r\n"
+ rxresp
+ expect resp.status == 400
+} -run
+
+client c7 -connect ${h1_fe1_sock} {
+ txreq -nolen \
+ -hdr "Transfer-Encoding: chunked, gzip, chunked" \
+ -body "0\r\n\r\n"
+ rxresp
+ expect resp.status == 400
+} -run
+
+client c8 -connect ${h1_fe1_sock} {
+ txreq -nolen \
+ -hdr "Transfer-Encoding: chunked, gzip" \
+ -body "0\r\n\r\n"
+ rxresp
+ expect resp.status == 400
+} -run
+
+client c9 -connect ${h1_fe1_sock} {
+ txreq \
+ -hdr "Transfer-Encoding: gzip"
+ rxresp
+ expect resp.status == 400
+} -run
+
+client c10 -connect ${h1_fe1_sock} {
+ txreq -nolen \
+ -hdr "Transfer-Encoding: gzip, chunked" \
+ -body "0\r\n\r\n"
+ rxresp
+ expect resp.status == 422
+} -run
+
+client c11 -connect ${h1_fe2_sock} {
+ txreq
+ rxresp
+ expect resp.status == 502
+} -run -repeat 4
diff --git a/reg-tests/http-messaging/http_wait_for_body.vtc b/reg-tests/http-messaging/http_wait_for_body.vtc
new file mode 100644
index 0000000..a9f8191
--- /dev/null
+++ b/reg-tests/http-messaging/http_wait_for_body.vtc
@@ -0,0 +1,171 @@
+varnishtest "A test for the wait-for-body HTTP action"
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+#REGTEST_TYPE=slow
+
+server s1 {
+ rxreq
+ expect req.bodylen == 1001
+ txresp
+
+ rxreq
+ expect req.bodylen == 1001
+ txresp
+} -start
+
+
+server s2 {
+ rxreq
+ send "HTTP/1.1 200 OK\r\n"
+ send "Content-Length: 1001\r\n\r\n"
+ delay 0.01
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+
+ expect_close
+ accept
+
+ rxreq
+ send "HTTP/1.1 200 OK\r\n"
+ send "Content-Length: 1001\r\n\r\n"
+ delay 0.01
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ delay 0.01
+ send "1"
+
+ rxreq
+ send "HTTP/1.1 201 OK\r\n"
+ send "Content-Length: 1001\r\n\r\n"
+ delay 0.01
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ delay 0.1
+ send "1"
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+ http-request wait-for-body time 100ms if { path /a }
+ http-request wait-for-body time 100ms at-least 1000 if { path /b }
+ use_backend be1
+
+ backend be1
+ server srv1 ${s1_addr}:${s1_port}
+
+ frontend fe2
+ bind "fd@${fe2}"
+ use_backend be2
+
+ backend be2
+ http-response wait-for-body time 100ms if { status eq 200 }
+ http-response wait-for-body time 100ms at-least 1000 if { status eq 201 }
+ server srv1 ${s2_addr}:${s2_port}
+} -start
+
+
+client c1 -connect ${h1_fe1_sock} {
+ send "GET /a HTTP/1.1\r\n"
+ send "Content-Length: 1001\r\n\r\n"
+ delay 0.01
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ rxresp
+ expect resp.status == 408
+} -run
+
+client c2 -connect ${h1_fe1_sock} {
+ send "GET /a HTTP/1.1\r\n"
+ send "Content-Length: 1001\r\n\r\n"
+ delay 0.01
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ delay 0.01
+ send "1"
+ rxresp
+ expect resp.status == 200
+
+ send "GET /b HTTP/1.1\r\n"
+ send "Content-Length: 1001\r\n\r\n"
+ delay 0.01
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ delay 0.1
+ send "1"
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c3 -connect ${h1_fe2_sock} {
+ txreq
+ rxresp
+ expect resp.status == 504
+} -run
+
+client c4 -connect ${h1_fe2_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 1001
+
+ txreq
+ rxresp
+ expect resp.status == 201
+ expect resp.bodylen == 1001
+} -run
diff --git a/reg-tests/http-messaging/protocol_upgrade.vtc b/reg-tests/http-messaging/protocol_upgrade.vtc
new file mode 100644
index 0000000..ebb6328
--- /dev/null
+++ b/reg-tests/http-messaging/protocol_upgrade.vtc
@@ -0,0 +1,228 @@
+# This reg-test checks the full support of HTTP protocol upgrade, using a GET
+# method and a Connection: Upgrade header. The equivalent mechanism has been
+# defined in rfc8441 for HTTP/2 using CONNECT and a new pseudo-header
+# :protocol. Check that haproxy handles properly h1/h2 translation of protocol
+# upgrade requests and responses.
+
+varnishtest "h1/h2 support for protocol upgrade test"
+
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+
+# http/1.1 server
+server srv_h1 {
+ rxreq
+ expect req.method == "GET"
+ expect req.http.connection == "upgrade"
+ expect req.http.upgrade == "custom_protocol"
+
+ txresp \
+ -status 101 \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: custom_protocol"
+} -repeat 2 -start
+
+# http2 server
+server srv_h2 {
+ rxpri
+
+ stream 0 {
+ # manually send RFC8441 SETTINGS_ENABLE_CONNECT_PROTOCOL
+ sendhex "00 00 06 04 00 00 00 00 00 00 08 00 00 00 01"
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ rxhdrs
+ expect req.method == "CONNECT"
+ expect req.http.:scheme == "https"
+ expect req.http.:path == "/"
+ expect req.http.:authority == "127.0.0.1"
+ expect req.http.:protocol == "custom_protocol"
+
+ txresp \
+ -status 200
+ } -run
+} -repeat 2 -start
+
+# http2 server without support for RFC8441
+server srv_h2_no_ws {
+ rxpri
+
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ rxrst
+ } -run
+} -start
+
+# http2 server without support for RFC8441 : settings announced with value 0
+server srv_h2_no_ws2 {
+ rxpri
+
+ stream 0 {
+ # manually send RFC8441 SETTINGS_ENABLE_CONNECT_PROTOCOL with a value of 0
+ sendhex "00 00 06 04 00 00 00 00 00 00 08 00 00 00 00"
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ rxrst
+ } -run
+} -start
+
+haproxy hap -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ # h1 frontend connected to h2 frontend
+ listen frt_h1_h2
+ bind "fd@${frt_h1_h2}"
+ server feh2_srv ${hap_frt_h2_addr}:${hap_frt_h2_port} proto h2
+
+ # h2 frontend connected to srv_h1
+ listen frt_h2
+ bind "fd@${frt_h2}" proto h2
+ server srv_h1 ${srv_h1_addr}:${srv_h1_port}
+
+ # h1 frontend connected to srv_h2
+ listen frt_h1
+ bind "fd@${frt_h1}"
+ server srv_h2 ${srv_h2_addr}:${srv_h2_port} proto h2
+
+ # h1 frontend connected to srv_h2_no_ws
+ listen frt_h1_no_ws
+ bind "fd@${frt_h1_no_ws}"
+ server srv_h2_no_ws ${srv_h2_no_ws_addr}:${srv_h2_no_ws_port} proto h2
+
+ # h1 frontend connected to srv_h2_no_ws2
+ listen frt_h1_no_ws2
+ bind "fd@${frt_h1_no_ws2}"
+ server srv_h2_no_ws2 ${srv_h2_no_ws2_addr}:${srv_h2_no_ws2_port} proto h2
+
+ # h2 frontend connected to h1 frontend
+ listen frt_h2_h1
+ bind "fd@${frt_h2_h1}" proto h2
+ server frt_h1 ${hap_frt_h1_addr}:${hap_frt_h1_port}
+} -start
+
+## connect to h1 translation frontend
+client c1_h1_h2 -connect ${hap_frt_h1_h2_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: custom_protocol"
+
+ rxresp
+ expect resp.status == 101
+ expect resp.http.connection == "upgrade"
+ expect resp.http.upgrade == "custom_protocol"
+} -run
+
+# connect to h2 server frontend
+client c2_h2 -connect ${hap_frt_h2_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "CONNECT" \
+ -scheme "http" \
+ -url "/" \
+ -hdr ":authority" "127.0.0.1" \
+ -hdr ":protocol" "custom_protocol"
+
+ rxhdrs
+ expect resp.status == 200
+ } -run
+} -run
+
+# connect to h2 translation frontend
+client c3_h2_h1 -connect ${hap_frt_h2_h1_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "CONNECT" \
+ -scheme "http" \
+ -url "/" \
+ -hdr ":authority" "127.0.0.1" \
+ -hdr ":protocol" "custom_protocol"
+
+ rxhdrs
+ expect resp.status == 200
+ } -run
+} -run
+
+# connect to h1 server frontend
+client c4_h1 -connect ${hap_frt_h1_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: custom_protocol"
+
+ rxresp
+ expect resp.status == 101
+ expect resp.http.connection == "upgrade"
+ expect resp.http.upgrade == "custom_protocol"
+} -run
+
+# connect via h1 server frontend to h2 server without RFC8441 support
+client c5 -connect ${hap_frt_h1_no_ws_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: custom_protocol"
+
+ rxresp
+ expect resp.status == 502
+} -run
+
+# connect via h1 server frontend to h2 server without RFC8441 support
+client c6 -connect ${hap_frt_h1_no_ws2_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: custom_protocol"
+
+ rxresp
+ expect resp.status == 502
+} -run
diff --git a/reg-tests/http-messaging/scheme_based_normalize.vtc b/reg-tests/http-messaging/scheme_based_normalize.vtc
new file mode 100644
index 0000000..3edbafb
--- /dev/null
+++ b/reg-tests/http-messaging/scheme_based_normalize.vtc
@@ -0,0 +1,125 @@
+varnishtest "scheme based normalization (rfc3982 6.3.2)"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature ignore_unknown_macro
+
+syslog S1 -level info {
+ recv
+ expect ~ "^.* uri: GET http://hostname/ HTTP/2.0; host: {hostname}$"
+
+ recv
+ expect ~ "^.* uri: GET http://hostname:8080/ HTTP/2.0; host: {hostname:8080}$"
+
+ recv
+ expect ~ "^.* uri: GET https://hostname/ HTTP/2.0; host: {hostname}$"
+
+ recv
+ expect ~ "^.* uri: GET https://hostname:80/ HTTP/2.0; host: {hostname:80}$"
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}" proto h2
+
+ http-request capture req.hdr(host) len 512
+ log-format "uri: %r; host: %hr"
+ log ${S1_addr}:${S1_port} len 2048 local0 debug err
+
+ http-request return status 200
+} -start
+
+# default port 80 with http scheme => should be normalized
+client c1 -connect ${h1_fe_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "GET" \
+ -scheme "http" \
+ -url "/" \
+ -hdr ":authority" "hostname:80"
+ rxhdrs
+ expect resp.status == 200
+ } -run
+} -run
+
+# port 8080 with http scheme => no normalization
+client c2 -connect ${h1_fe_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "GET" \
+ -scheme "http" \
+ -url "/" \
+ -hdr ":authority" "hostname:8080"
+ rxhdrs
+ expect resp.status == 200
+ } -run
+} -run
+
+# default port 443 with https scheme => should be normalized
+client c3 -connect ${h1_fe_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/" \
+ -hdr ":authority" "hostname:443"
+ rxhdrs
+ expect resp.status == 200
+ } -run
+} -run
+
+# port 80 with https scheme => no normalization
+client c4 -connect ${h1_fe_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/" \
+ -hdr ":authority" "hostname:80"
+ rxhdrs
+ expect resp.status == 200
+ } -run
+} -run
+
+syslog S1 -wait
diff --git a/reg-tests/http-messaging/srv_ws.vtc b/reg-tests/http-messaging/srv_ws.vtc
new file mode 100644
index 0000000..f0f5f8b
--- /dev/null
+++ b/reg-tests/http-messaging/srv_ws.vtc
@@ -0,0 +1,180 @@
+# This reg-test checks websocket support in regards with the server keyword
+# 'ws'
+
+varnishtest "h2 backend websocket management via server keyword"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)' && !ssllib_name_startswith(wolfSSL)'"
+feature ignore_unknown_macro
+
+# haproxy server
+haproxy hapsrv -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+ bind "fd@${fessl}" ssl crt ${testdir}/common.pem alpn h2,http/1.1
+ capture request header sec-websocket-key len 128
+ http-request set-var(txn.ver) req.ver
+ use_backend be
+
+ backend be
+ # define websocket ACL
+ acl ws_handshake hdr(upgrade) -m str websocket
+
+ # handle non-ws streams
+ http-request return status 200 if !ws_handshake
+
+ # handle ws streams
+ #capture request header sec-websocket-key len 128
+ http-request return status 200 hdr connection upgrade hdr upgrade websocket hdr sec-websocket-accept "%[capture.req.hdr(0),concat(258EAFA5-E914-47DA-95CA-C5AB0DC85B11,,),sha1,base64]" if ws_handshake
+ http-after-response set-status 101 if { status eq 200 } { res.hdr(upgrade) -m str websocket }
+ http-after-response set-header x-backend-protocol "%[var(txn.ver)]"
+} -start
+
+# haproxy LB
+haproxy hap -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ # proto X ws h1 -> websocket on h1
+ listen li
+ bind "fd@${li}"
+ server hap_srv ${hapsrv_fe_addr}:${hapsrv_fe_port} proto h2 ws h1
+
+ # proto X ws h2 -> websocket on h2
+ listen lih2
+ bind "fd@${lih2}"
+ server hap_srv ${hapsrv_fe_addr}:${hapsrv_fe_port} proto h2 ws h2
+
+ # alpn h2,http/1.1 ws h2 -> websocket on h2
+ listen lisslh2
+ bind "fd@${lisslh2}"
+ server hap_srv ${hapsrv_fessl_addr}:${hapsrv_fessl_port} ssl verify none alpn h2,http/1.1 ws h2
+ http-response set-header x-alpn "%[ssl_bc_alpn]"
+
+ # ws auto -> websocket on h1
+ listen liauto
+ bind "fd@${liauto}"
+ server hap_srv ${hapsrv_fe_addr}:${hapsrv_fe_port}
+
+ # alpn h2,http/1.1 ws auto -> websocket on h1
+ listen lissl
+ bind "fd@${lissl}"
+ server hap_srv ${hapsrv_fessl_addr}:${hapsrv_fessl_port} ssl verify none alpn h2,http/1.1 ws auto
+ http-response set-header x-alpn "%[ssl_bc_alpn]"
+ # alpn h2,http/1.1 ws auto -> websocket on h1
+ listen lisslauto
+ bind "fd@${lisslauto}"
+ server hap_srv ${hapsrv_fessl_addr}:${hapsrv_fessl_port} ssl verify none alpn h2,http/1.1
+ http-response set-header x-alpn "%[ssl_bc_alpn]"
+
+ # proto h2 ws auto -> websocket on h2
+ listen liauto2
+ bind "fd@${liauto2}"
+ server hap_srv ${hapsrv_fe_addr}:${hapsrv_fe_port} proto h2
+
+ # alpn h2 ws auto -> websocket on h2
+ listen lisslauto2
+ bind "fd@${lisslauto2}"
+ server hap_srv ${hapsrv_fessl_addr}:${hapsrv_fessl_port} ssl verify none alpn h2 ws auto
+ http-response set-header x-alpn "%[ssl_bc_alpn]"
+} -start
+
+client c1 -connect ${hap_li_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.x-backend-protocol == "1.1"
+} -run
+
+client c1.2 -connect ${hap_lih2_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.x-backend-protocol == "2.0"
+} -run
+
+client c1.3 -connect ${hap_liauto_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.x-backend-protocol == "1.1"
+} -run
+
+client c1.4 -connect ${hap_liauto2_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.x-backend-protocol == "2.0"
+} -run
+
+client c2 -connect ${hap_lisslauto_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.x-alpn == "http/1.1"
+} -run
+
+client c2.2 -connect ${hap_lisslauto2_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.x-alpn == "h2"
+} -run
+
+client c2.3 -connect ${hap_lisslh2_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.x-alpn == "h2"
+} -run
diff --git a/reg-tests/http-messaging/truncated.vtc b/reg-tests/http-messaging/truncated.vtc
new file mode 100644
index 0000000..7579f6d
--- /dev/null
+++ b/reg-tests/http-messaging/truncated.vtc
@@ -0,0 +1,101 @@
+varnishtest "HTTP response size tests: H2->H1 (HTX and legacy mode)"
+#REQUIRE_VERSION=1.9
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp \
+ -status 200 \
+ -nolen \
+ -hdr "Transfer-encoding: chunked"
+ # -bodylen 16300
+ #chunkedlen 16300
+ #delay 0.05
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+} -repeat 2 -start
+
+haproxy h1 -conf {
+ defaults
+ #log stdout format raw daemon
+ mode http
+ option http-buffer-request
+ timeout connect 1s
+ timeout client 1s
+ timeout server 1s
+
+ listen feh1
+ bind "fd@${feh1}"
+ bind "fd@${feh2}" proto h2
+ http-response add-header a b
+ #http-response del-header content-length
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1h2 -connect ${h1_feh2_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ # first request is valid
+ stream 1 {
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/test1.html"
+ rxhdrs
+ #delay 0.1
+ expect resp.status == 200
+ rxdata -all
+ expect resp.bodylen == 16300
+ #expect resp.chunkedlen == 16300
+ } -run
+} -repeat 2 -run
diff --git a/reg-tests/http-messaging/websocket.vtc b/reg-tests/http-messaging/websocket.vtc
new file mode 100644
index 0000000..aed55fe
--- /dev/null
+++ b/reg-tests/http-messaging/websocket.vtc
@@ -0,0 +1,211 @@
+# This reg-test is uses to test respect of the websocket protocol according to
+# rfc6455.
+#
+# In particular, a request/response without a websocket key must be rejected by
+# haproxy. Note that in the tested case (h1 on both sides), haproxy does not
+# validate the key of the server but only checks its presence.
+#
+# For the case h2 client/h1 server, haproxy would add the key and validates it.
+# However, there is no way to check this case quickly at the moment using vtest.
+
+varnishtest "WebSocket test"
+
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+
+# valid websocket server
+server s1 {
+ rxreq
+ expect req.method == "GET"
+ expect req.http.connection == "upgrade"
+ expect req.http.upgrade == "websocket"
+ expect req.http.sec-websocket-key == "dGhlIHNhbXBsZSBub25jZQ=="
+
+ txresp \
+ -status 101 \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo="
+
+ recv 4
+ send "PONG"
+} -start
+
+# non-conformant server: no websocket key
+server s2 {
+ rxreq
+ expect req.method == "GET"
+ expect req.http.connection == "upgrade"
+ expect req.http.upgrade == "websocket"
+
+ txresp \
+ -status 101 \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket"
+} -start
+
+# haproxy instance used as a server
+# generate a http/1.1 websocket response with the valid key
+haproxy hap_srv -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen fe1
+ bind "fd@${fe1}"
+
+ # reject if the request does not contains a websocket key
+ acl ws_handshake hdr(sec-websocket-key) -m found
+ http-request reject unless ws_handshake
+
+ # return a valid websocket handshake response
+ capture request header sec-websocket-key len 128
+ http-request return status 200 hdr connection upgrade hdr upgrade websocket hdr sec-websocket-accept "%[capture.req.hdr(0),concat(258EAFA5-E914-47DA-95CA-C5AB0DC85B11,,),sha1,base64]"
+ http-after-response set-status 101 if { status eq 200 }
+} -start
+
+# haproxy instance used as a server
+# generate a http/1.1 websocket response with an invalid key
+haproxy hap_srv_bad_key -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen fe1
+ bind "fd@${fe1}"
+
+ # reject if the request does not contains a websocket key
+ acl ws_handshake hdr(sec-websocket-key) -m found
+ http-request reject unless ws_handshake
+
+ # return an invalid websocket handshake response
+ capture request header sec-websocket-key len 128
+ http-request return status 200 hdr connection upgrade hdr upgrade websocket hdr sec-websocket-accept "invalid_key"
+ http-after-response set-status 101 if { status eq 200 }
+} -start
+
+haproxy hap -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen fe1
+ bind "fd@${fe1}"
+ server s1 ${s1_addr}:${s1_port}
+
+ listen fe2
+ bind "fd@${fe2}"
+ server s2 ${s2_addr}:${s2_port}
+
+ listen fe3
+ bind "fd@${fe3}" proto h2
+ server hap_srv ${hap_srv_fe1_addr}:${hap_srv_fe1_port}
+
+ listen fe4
+ bind "fd@${fe4}" proto h2
+ server hap_srv_bad_key ${hap_srv_bad_key_fe1_addr}:${hap_srv_bad_key_fe1_port}
+} -start
+
+# standard request
+client c1 -connect ${hap_fe1_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.connection == "upgrade"
+ expect resp.http.upgrade == "websocket"
+ expect resp.http.sec-websocket-accept == "s3pPLMBiTxaQ9kYGzzhZRbK+xOo="
+
+ send "PING"
+ recv 4
+} -run
+
+# missing websocket key
+client c2 -connect ${hap_fe1_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# missing key on server side
+client c3 -connect ${hap_fe2_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+
+ rxresp
+ expect resp.status == 502
+} -run
+
+# connect with http/2 on a http/1.1 websocket server
+# the key must be provided by haproxy
+client c4 -connect ${hap_fe3_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "CONNECT" \
+ -scheme "http" \
+ -url "/" \
+ -hdr ":authority" "127.0.0.1" \
+ -hdr ":protocol" "websocket"
+
+ rxhdrs
+ expect resp.status == 200
+ } -run
+} -run
+
+# connect with http/2 on a http/1.1 websocket server
+# however, the server will respond with an invalid key
+# haproxy is responsible to reject the request and returning a 502 to the client
+client c5 -connect ${hap_fe4_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "CONNECT" \
+ -scheme "http" \
+ -url "/" \
+ -hdr ":authority" "127.0.0.1" \
+ -hdr ":protocol" "websocket"
+
+ rxhdrs
+ expect resp.status == 502
+ } -run
+} -run